diff --git "a/1181.jsonl" "b/1181.jsonl" new file mode 100644--- /dev/null +++ "b/1181.jsonl" @@ -0,0 +1,838 @@ +{"seq_id": "22224692881", "text": "import sqlite3, os\nSQLITE_NAME = \"fatpanda.tmp.db\"\n# if os.path.isfile(SQLITE_NAME): os.remove(SQLITE_NAME)\n\n\ndef fpd_raw_connection(db_path=SQLITE_NAME):\n conn = sqlite3.connect(db_path)\n '''Optional processing'''\n return conn\n\nfrom .readers import (\n read_csv,\n concat_csv,\n read_sql_query\n)", "repo_name": "shashfrankenstien/FatPanda", "sub_path": "fatpanda/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 311, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "41185222380", "text": "import json\nfrom flask import Flask\n\napp = Flask(__name__)\n\nwith open('../data/keywords_json.json', 'r') as f:\n jsondata = json.load(f)\n\n\n@app.route('/')\ndef index():\n return jsondata\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "secantsquared/flaskreactapp", "sub_path": "server/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 243, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "json.load", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "10697609667", "text": "import json\nd = {\n \"vezetekNev\": \"Kovacs\",\n \"keresztNev\": \"Janos\",\n \"kor\": 25,\n \"cim\":\n {\n \"utcaHazszam\": \"2. utca 21.\",\n \"varos\": \"New York\",\n \"allam\": \"NY\",\n \"iranyitoSzam\": \"10021\"\n }\n}\nprint(json.dumps(d))\nwith open(\"dump.txt\", \"w\") as f:\n\tjson.dump(d,f)\n\n", "repo_name": "Gero4884/Gero4884", "sub_path": "json1.py", "file_name": "json1.py", "file_ext": "py", "file_size_in_byte": 316, "program_lang": "python", "lang": "hu", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "40233180338", "text": "from __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport os\n\n# NB(zundel): these definitions are a part of the source from https://github.com/pantsbuild/pants\nfrom pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary\nfrom pants.base.exceptions import TargetDefinitionException\nfrom pants.base.payload import Payload\nfrom pants.base.payload_field import PrimitiveField\n\n\nlogger = logging.getLogger(__name__)\n\nclass JaxWsLibrary(ExportableJvmLibrary):\n \"\"\"Generates a Java library from JAX-WS wsdl files.\"\"\"\n\n def __init__(self,\n payload=None,\n vm_args=None,\n xjc_args=None,\n extra_args=None,\n **kwargs):\n \"\"\"Generates a Java library from WSDL files using JAX-WS.\n\n :param list vm_args: Additional arguments for the JVM.\n :param list xjc_args: Additional arguments to xjc.\n :param list extra_args: Additional arguments for the CLI.\n \"\"\"\n payload = payload or Payload()\n payload.add_fields({\n 'vm_args': PrimitiveField(vm_args or ()),\n 'xjc_args': PrimitiveField(xjc_args or ()),\n 'extra_args': PrimitiveField(extra_args or ()),\n })\n super(JaxWsLibrary, self).__init__(payload=payload, **kwargs)\n self.add_labels('codegen')\n", "repo_name": "ericzundel/mvn2pants", "sub_path": "src/python/squarepants/plugins/jax_ws/targets/jax_ws_library.py", "file_name": "jax_ws_library.py", "file_ext": "py", "file_size_in_byte": 1380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "pants.backend.jvm.targets.exportable_jvm_library.ExportableJvmLibrary", "line_number": 16, "usage_type": "name"}, {"api_name": "pants.base.payload.Payload", "line_number": 31, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 33, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 34, "usage_type": "call"}, {"api_name": "pants.base.payload_field.PrimitiveField", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "42167978913", "text": "import argparse\n\nfrom game import Runner\n\nTRAPS = [(2, 2), (3, 3), (4, 4), (5, 5)]\nWIND = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]\nSTART = (3, 0)\nGOAL = (3, 7)\n\nparser = argparse.ArgumentParser(description='My script')\nparser.add_argument('-a', '--actions', help='Number of actions that agent can take (4, 8, 9')\nparser.add_argument('-e', '--episodes', help='Number of training episodes')\nparser.add_argument('-v', '--verbose', help='verbose')\nparser.add_argument('-t', '--traps', help='Number of traps 0,1,2,3,4')\n\nargs = parser.parse_args()\nactions_list = [4, 8, 9]\nif args.actions:\n actions_list = [int(args.actions)]\nepisodes = int(args.episodes or 200)\nverbose = int(args.verbose or 10)\ntraps = min(4, int(args.traps or 0))\nfor actions in actions_list:\n if actions not in [4, 8, 9]:\n raise Exception('Invalid \"-a/--actions\"')\n\nfor actions in actions_list:\n game = Runner(num_actions=actions,\n start=START,\n goal=GOAL,\n verbose=verbose,\n episodes=episodes,\n rows=7,\n cols=10,\n gamma=1,\n wind=WIND,\n traps=TRAPS[:traps],\n render_interval=[51, 50])\n game.train()\n game.test(pause=3)\n", "repo_name": "shadi-danhash/q-learning-simulation", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1267, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "game.Runner", "line_number": 28, "usage_type": "call"}, {"api_name": "game.train", "line_number": 39, "usage_type": "call"}, {"api_name": "game.test", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "37253410841", "text": "from django.core.management.base import BaseCommand\nfrom optparse import make_option\n\nfrom synnefo import quotas\n\n\nclass Command(BaseCommand):\n help = \"Detect and resolve pending commissions to Quotaholder\"\n output_transaction = True\n option_list = BaseCommand.option_list + (\n make_option(\"--fix\", dest=\"fix\",\n action='store_true',\n default=False,\n help=\"Fix pending commissions\"\n ),\n )\n\n def handle(self, *args, **options):\n fix = options['fix']\n\n accepted, rejected = quotas.resolve_pending_commissions()\n\n if accepted:\n self.stdout.write(\"Pending accepted commissions:\\n %s\\n\"\n % list_to_string(accepted))\n\n if rejected:\n self.stdout.write(\"Pending rejected commissions:\\n %s\\n\"\n % list_to_string(rejected))\n\n if fix and (accepted or rejected):\n self.stdout.write(\"Fixing pending commissions..\\n\")\n quotas.resolve_commissions(accept=accepted, reject=rejected,\n strict=False)\n\n\ndef list_to_string(l):\n return \",\".join([str(x) for x in l])\n", "repo_name": "mpastyl/websocket-console", "sub_path": "synnefo/snf-cyclades-app/synnefo/quotas/management/commands/reconcile-commissions-cyclades.py", "file_name": "reconcile-commissions-cyclades.py", "file_ext": "py", "file_size_in_byte": 1218, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 7, "usage_type": "name"}, {"api_name": "django.core.management.base.BaseCommand.option_list", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 10, "usage_type": "name"}, {"api_name": "optparse.make_option", "line_number": 11, "usage_type": "call"}, {"api_name": "synnefo.quotas.resolve_pending_commissions", "line_number": 21, "usage_type": "call"}, {"api_name": "synnefo.quotas", "line_number": 21, "usage_type": "name"}, {"api_name": "synnefo.quotas.resolve_commissions", "line_number": 33, "usage_type": "call"}, {"api_name": "synnefo.quotas", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "32933771056", "text": "from django.contrib import admin\nfrom . import models\nfrom django.utils.safestring import mark_safe\n\n\n# Register your models here.\n\nclass TagAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass CategorieAdmin(admin.ModelAdmin):\n list_display = (\n 'titre',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'statut',\n 'date_add',\n 'date_update',\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n\n fieldsets = [\n ('Info ', {'fields': ['titre', ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass CommentaireAdmin(admin.ModelAdmin):\n\n def affiche_image(self, obj):\n if obj.cover:\n return mark_safe(''.format(url=obj.cover.url))\n\n list_display = (\n 'article',\n 'nom',\n 'email',\n 'message',\n 'affiche_image',\n 'statut',\n 'date_add',\n 'date_update'\n )\n\n list_filter = (\n 'article',\n 'statut',\n 'date_add',\n 'date_update'\n )\n search_fields = (\n 'message',\n 'date_add'\n )\n readonly_fields = ['affiche_image']\n fieldsets = [\n ('Info ', {'fields': ['article', 'nom', 'email', 'message', ]\n }),\n ('Image', {'fields': [\n 'cover',\n 'affiche_image'\n ]}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n\nclass ArticleAdmin(admin.ModelAdmin):\n list_display = (\n 'auteur',\n 'titre',\n 'affiche_image',\n 'statut',\n 'date_add',\n 'date_update'\n )\n list_filter = (\n 'categorie',\n 'statut',\n 'tags'\n )\n search_fields = (\n 'titre',\n )\n list_per_pages = 50\n date_hierarchy = 'date_add'\n readonly_fields = ['affiche_image']\n\n fieldsets = [\n ('Info ', {'fields': [\n 'auteur',\n 'titre',\n 'categorie',\n 'tags',\n 'contenu',\n 'resume'\n ]\n }),\n ('Image', {'fields': ['cover', 'affiche_image']}),\n ('Statut et Activations', {'fields': ['statut', ]}),\n ]\n\n def affiche_image(self, obj):\n return mark_safe(''.format(url=obj.cover.url))\n\n\n\n\n\ndef _register(model, admin_class):\n admin.site.register(model, admin_class)\n\n\n_register(models.Article, ArticleAdmin)\n_register(models.Commentaire, CommentaireAdmin)\n_register(models.Categorie, CategorieAdmin)\n_register(models.Tag, TagAdmin)\n\n\n", "repo_name": "paulemxx/Orgo", "sub_path": "blog/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 3041, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 32, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 60, "usage_type": "call"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 95, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 95, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 131, "usage_type": "call"}, {"api_name": "django.contrib.admin.site.register", "line_number": 138, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 138, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "38191438054", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 25 13:56:35 2018\n\n@author: galaz\n\"\"\"\n\nimport pyedflib\nimport numpy as np\nimport matplotlib as plt\nf = pyedflib.EdfReader(\"3-1-Schlucktest_Leitfaehigkeit_edited_triggerMarker_edited.bdf\")\nn = f.signals_in_file\nsignal_labels = f.getSignalLabels()\nsigbufs = np.zeros((n, f.getNSamples()[0]))\nfor i in np.arange(n):\n sigbufs[i, :] = f.readSignal(i)\n \nBI = sigbufs[0] \nEMG = sigbufs[1] \nannotations = f.readAnnotations() \nsample_frequency= 4000 \n\ndef segment(t_after,t_befor,sample_frequency,annotations,BI,EMG):\n BI_segment_list = []\n EMG_segment_list = []\n for i in range (annotations[0].size):\n BI_segment = []\n EMG_segment = []\n swallow_index= int(sample_frequency*annotations[0][i])\n segment_start =swallow_index-int(sample_frequency*t_befor)\n segment_end = swallow_index+int(sample_frequency*t_after)\n segment_length = segment_end-segment_start\n for j in range (segment_length):\n BI_segment.append(BI[segment_start+j])\n EMG_segment.append(EMG[segment_start+j])\n BI_segment_list.append(BI_segment) \n EMG_segment_list.append(EMG_segment) \n return [BI_segment_list, EMG_segment_list] \n \nresult=segment(2,0.5,4000,annotations,BI,EMG) \n\nfig = plt.pyplot.figure()\nax = fig.add_subplot(111)\nnumberofsegment='123456'\n\nfor i in range(annotations[0].size):\n ax = fig.add_subplot(2,annotations[0].size,i+1)\n ax.plot(result[0][i])\n ax = fig.add_subplot(2,annotations[0].size,(annotations[0].size+i+1))\n ax.plot(result[1][i])\n ax.set_title('The: %s st'%numberofsegment[i])\n\n\n", "repo_name": "Gamil-Farea/Schluckerkennung", "sub_path": "Code/Gamil_test_V1.py", "file_name": "Gamil_test_V1.py", "file_ext": "py", "file_size_in_byte": 1642, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pyedflib.EdfReader", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "1640022022", "text": "from functools import lru_cache\n\n\nclass Solution:\n def minimumDistance(self, word: str) -> int:\n word_length = len(word)\n\n def distance(char_a: str, char_b: str) -> int:\n if not char_a or not char_b:\n # return 0 for the first letter\n return 0\n\n index_a = ord(char_a) - ord('A')\n index_b = ord(char_b) - ord('A')\n\n return abs(index_a // 6 - index_b // 6) + abs(index_a % 6 - index_b % 6)\n\n @lru_cache(maxsize=None)\n def find(ind: int, key_a: str, key_b: str) -> int:\n # boundary condition\n if ind == word_length:\n return 0\n\n char = word[ind]\n\n return min(\n find(ind + 1, key_a, char) + distance(key_b, char),\n find(ind + 1, char, key_b) + distance(key_a, char)\n )\n\n return find(0, None, None)\n\n\nif __name__ == '__main__':\n word = \"A\" * 300\n print(Solution().minimumDistance(word))", "repo_name": "amogchandrashekar/Leetcode", "sub_path": "Hard/Minimum Distance to Type a Word Using Two Fingers.py", "file_name": "Minimum Distance to Type a Word Using Two Fingers.py", "file_ext": "py", "file_size_in_byte": 997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "functools.lru_cache", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "29223279208", "text": "import random\nfrom time import sleep, time\nimport starkbank\nfrom datetime import datetime, timedelta\nfrom src.authentication import user\n\nstarkbank.user = user\n\n\ninvoices = starkbank.invoice.create([\n starkbank.Invoice(\n amount=248,\n descriptions=[{'key': 'Arya', 'value': 'Not today'}],\n discounts=[{'percentage': 10, 'due': datetime.now()+timedelta(days=10)}],\n due=datetime.now()+timedelta(days=10),\n expiration=123456789,\n fine=2.5,\n interest=1.3,\n name=\"Arya Stark\",\n tags=['New sword', 'Invoice #1234'],\n tax_id=\"29.176.331/0001-69\"\n )\n])\n\nbreakpoint()", "repo_name": "RodrigoNavarroNogueira/apisdk", "sub_path": "src/teste.py", "file_name": "teste.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "starkbank.user", "line_number": 7, "usage_type": "attribute"}, {"api_name": "src.authentication.user", "line_number": 7, "usage_type": "name"}, {"api_name": "starkbank.invoice.create", "line_number": 10, "usage_type": "call"}, {"api_name": "starkbank.invoice", "line_number": 10, "usage_type": "attribute"}, {"api_name": "starkbank.Invoice", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "28928388087", "text": "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\nimport uuid\nimport random\nimport subprocess\n\nfrom argparse import ArgumentParser\n\n\ndef list_files(path_to_annotations, file_extension):\n \"\"\"Get list of files in a given directory\"\"\"\n file_list = []\n for file in os.listdir(path_to_annotations):\n if file.endswith('.' + file_extension):\n file_list.append(file)\n return file_list\n\n\ndef pick_random_images(background_dir):\n \"\"\"Return paths to randomly chosen fore/background images\"\"\"\n background_list = list_files(background_dir, 'jpg')\n background_image = os.path.join(background_dir, random.choice(background_list))\n\n return background_image\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--input_dir', '-i', type=str, help='directory of foreground images')\n parser.add_argument('--background_dir', '-b', type=str, help='directory of background images')\n parser.add_argument('--out_dir', '-o', type=str, help='output directory')\n args = parser.parse_args()\n\n sequence_names = []\n for item in os.listdir(args.input_dir):\n if os.path.isdir(item):\n sequence_names.append(item)\n\n for sequence in sequence_names:\n input = os.path.join(args.input_dir, sequence)\n background = pick_random_images(args.background_dir)\n output = os.path.join(args.out_dir, sequence)\n\n cmd = ['python', 'composite_video.py',\n '--input', input,\n '--background', background,\n '--output', output]\n\n subprocess.check_call(cmd)\n", "repo_name": "atomicguy/simulants", "sub_path": "simulants/legacy/batch_comp_videos.py", "file_name": "batch_comp_videos.py", "file_ext": "py", "file_size_in_byte": 1657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 31, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "subprocess.check_call", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "32404734265", "text": "#pylint:disable=E1101\n\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.distributions import Categorical\nimport numpy as np\nimport pandas as pd\nimport numpy as numpy\nimport matplotlib.pyplot as plt\nimport argparse\nfrom models import MyModel\nfrom math_dataset import MyDataset\n\n\ndef main():\n _i, _j, _k = 2,3,3\n dataset = MyDataset(_i,_j,_k)\n\n dtype = torch.float\n device = torch.device(\"cpu\")\n # device = torch.device(\"cuda:0\")\n\n #batch, input, hidden, output\n N, D_in, H, D_out = 10, _i+_j+_k, 16, _i*_j*_k\n msg_len = 10\n\n x, y = dataset.get_frame()\n x = torch.tensor(x, dtype=dtype, device=device)\n #x = torch.cat((x,x,x,x,x),0)\n y = torch.tensor(y, dtype=torch.long, device=device).squeeze()\n #y = torch.cat((y,y,y,y,y),0)\n print(x.size(), y.size())\n #x = torch.zeros(N, D_in, device=device, dtype=dtype)\n #y = torch.zeros(N, device=device, dtype=dtype)\n\n model = MyModel(D_in, H, D_out)\n #model = torch.nn.Linear(D_in, D_out)\n\n loss_fn = torch.nn.CrossEntropyLoss(reduce=None)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n\n for t in range(10001):\n if True: #reinforce\n y_pred = model(x)\n probs = F.softmax(y_pred, dim=1)\n m = Categorical(probs)\n action = m.sample()\n reward = torch.eq(action, y).to(torch.float)\n reward = (reward - reward.mean())\n loss = -m.log_prob(action) * reward\n model.zero_grad()\n loss.sum().backward()\n #loss.backward(loss)\n optimizer.step()\n \n elif True:\n y_pred = model(x)\n \n else: # supervised\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n model.zero_grad()\n loss.backward()\n optimizer.step()\n\n if t % 100 == 0:\n with torch.no_grad():\n y_pred = model(x)\n eq = torch.eq(torch.argmax(y_pred, dim=1), y)\n print(\"t: {}, acc: {}/{} = {}\".format(t, torch.sum(eq).item(), eq.numel(), torch.sum(eq).item() / eq.numel()))\n\n\n torch.save({'epoch': t,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, \"checkpoints.tar\")\n\nif __name__ == \"__main__\":\n main()\n \n\n\n\n # model3 = MyModel(D_in, H, D_out)\n # optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n # checkpoint = torch.load(\"checkpoints.tar\")\n # model.load_state_dict(checkpoint['model_state_dict'])\n # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n # epoch = checkpoint['epoch']\n # loss = checkpoint['loss']\n\n # print(model.state_dict())\n # print(optimizer.state_dict())\n\n # PATH = \"model.pt\"\n # torch.save(model.state_dict(), PATH)\n\n # model2 = MyModel(D_in, H, D_out)\n # model.load_state_dict(torch.load(PATH))\n # model.eval() # for dropout and BN", "repo_name": "parkjunsoo91/number-communication", "sub_path": "supervised.py", "file_name": "supervised.py", "file_ext": "py", "file_size_in_byte": 3095, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "math_dataset.MyDataset", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.MyModel", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 43, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.softmax", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.distributions.Categorical", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.eq", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "11966980442", "text": "import boto3\nimport time\nfrom datetime import date\nimport decimal\nfrom boto3.dynamodb.conditions import Key, Attr\n\nCONN = None\nTRANSACTIONS_TABLE_NAME = None\nUSERS_TABLE_NAME = None\nTRANSACTIONS_TABLE = None\nUSERS_TABLE = None\n\n\ndef open_connections_and_tables():\n global CONN\n global TRANSACTIONS_TABLE\n global USERS_TABLE\n CONN = boto3.resource('dynamodb')\n TRANSACTIONS_TABLE = CONN.Table(TRANSACTIONS_TABLE_NAME)\n USERS_TABLE = CONN.Table(USERS_TABLE_NAME)\n\n\ndef resolve_names_to_ids(names):\n ids = []\n for name in names:\n field_to_use = 'username' if name[0] == \"@\" else 'name'\n value_to_use = name[1:] if name[0] == \"@\" else name\n items = USERS_TABLE.scan(\n FilterExpression=Attr(field_to_use).eq(value_to_use)\n )[\"Items\"]\n if len(items) != 0:\n ids.append(items.pop()[\"id\"])\n else:\n ids.append(None)\n return dict(zip(names, ids))\n\n\ndef resolve_ids_to_names(ids):\n names = []\n for id in ids:\n items = USERS_TABLE.query(\n KeyConditionExpression=Key('id').eq(id),\n )[\"Items\"]\n if len(items) > 0:\n names.append(\"@{}\".format(items[0][\"username\"]) if \"username\" in items[0] else items[0][\"name\"])\n else:\n names.append(None)\n return names\n\n\ndef debit_transaction(gid, sender, receiver, amt, *, msg_id=None, description=\"\", on_hold=False):\n with decimal.localcontext(boto3.dynamodb.types.DYNAMODB_CONTEXT) as ctx:\n ctx.traps[decimal.Inexact] = False\n ctx.traps[decimal.Rounded] = False\n timestamp = ctx.create_decimal_from_float(time.time())\n item = {\n 'group_id': gid,\n 'id': msg_id,\n 'from': sender,\n 'to': receiver,\n 'amt': -abs(amt),\n 'timestamp': round(timestamp, 2),\n 'on_hold': on_hold\n }\n if description is not None and description.strip() != \"\":\n item[\"description\"] = description.strip()\n TRANSACTIONS_TABLE.put_item(Item=item)\n\n\ndef credit_transaction(gid, sender, receiver, amt, *, msg_id=None, description=\"\", on_hold=False):\n with decimal.localcontext(boto3.dynamodb.types.DYNAMODB_CONTEXT) as ctx:\n ctx.traps[decimal.Inexact] = False\n ctx.traps[decimal.Rounded] = False\n timestamp = ctx.create_decimal_from_float(time.time())\n item = {\n 'group_id': gid,\n 'id': msg_id,\n 'from': sender,\n 'to': receiver,\n 'amt': abs(amt),\n 'timestamp': round(timestamp, 2),\n 'on_hold': on_hold\n }\n if description is not None and description.strip() != \"\":\n item[\"description\"] = description.strip()\n TRANSACTIONS_TABLE.put_item(Item=item)\n\n\ndef view_account(gid, user):\n gid = int(gid)\n user = int(user)\n response = TRANSACTIONS_TABLE.query(\n Select='ALL_ATTRIBUTES',\n KeyConditionExpression=Key('group_id').eq(int(gid)) & Key('timestamp').gt(0),\n FilterExpression=(Attr('from').eq(int(user)) | Attr('to').eq(int(user))) & Attr('on_hold').ne(True)\n )\n entries = response[\"Items\"]\n account = {}\n for entry in entries:\n is_payment = entry[\"amt\"] > 0\n is_receiver = entry[\"to\"] == user\n print(entry, is_payment, is_receiver)\n\n dict_key = entry[\"from\"] if is_receiver else entry[\"to\"]\n if dict_key not in account:\n account[dict_key] = 0\n account[dict_key] = account[dict_key] + (entry[\"amt\"] * (-1 if is_receiver else 1))\n return account\n\n\ndef view_logs(gid, user_id, filter_id, esk=None):\n gid = int(gid)\n uid = int(user_id)\n fid = int(filter_id) if filter_id is not None else None\n if fid is not None:\n filter_expression = (Attr('from').eq(uid) & Attr('to').eq(fid)) | (Attr('from').eq(fid) & Attr('to').eq(uid))\n else:\n filter_expression = Attr('from').eq(uid) | Attr('to').eq(uid)\n entries = []\n while len(entries) < 0:\n response = TRANSACTIONS_TABLE.query(\n Select='ALL_ATTRIBUTES',\n KeyConditionExpression=Key('group_id').eq(int(gid)) & Key('timestamp').gt(0),\n FilterExpression=filter_expression,\n Limit=10,\n ExclusiveStartKey=esk\n )\n entries.extend(response[\"Items\"])\n lek = response.get(\"LastEvaluatedKey\", None)\n # break into timeframes\n\n def destructive_filter(list, callback):\n pass\n # this week\n this_week = [e for e in entries if date.fromtimestamp(e[\"timestamp\"]).isocalendar()[1] == date.today().isocalendar()[1]]\n # last week\n last_week = [e for e in entries if date.fromtimestamp(e[\"timestamp\"]).isocalendar()[1] == date.today().isocalendar()[1] - 1]\n # this month\n # last month\n\n\ndef register_user(uid, *, username=\"\", name=\"\"):\n data = {\n \"id\": int(uid)\n }\n username = username.strip()\n name = name.strip()\n if username != \"\":\n data[\"username\"] = username\n if name != \"\":\n data[\"name\"] = name\n USERS_TABLE.put_item(Item=data)\n\n\ndef find_transaction(gid, mid):\n response = TRANSACTIONS_TABLE.query(\n KeyConditionExpression=Key('group_id').eq(gid),\n FilterExpression=Attr('id').eq(mid)\n )\n try:\n return response[\"Items\"].pop()\n except IndexError:\n return None\n\n\ndef update_transaction(transaction):\n TRANSACTIONS_TABLE.put_item(Item=transaction)\n", "repo_name": "chesnutcase/ledger_bot", "sub_path": "utils/tableutils.py", "file_name": "tableutils.py", "file_ext": "py", "file_size_in_byte": 5439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "boto3.resource", "line_number": 18, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 29, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 42, "usage_type": "call"}, {"api_name": "decimal.localcontext", "line_number": 52, "usage_type": "call"}, {"api_name": "boto3.dynamodb", "line_number": 52, "usage_type": "attribute"}, {"api_name": "decimal.Inexact", "line_number": 53, "usage_type": "attribute"}, {"api_name": "decimal.Rounded", "line_number": 54, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 55, "usage_type": "call"}, {"api_name": "decimal.localcontext", "line_number": 71, "usage_type": "call"}, {"api_name": "boto3.dynamodb", "line_number": 71, "usage_type": "attribute"}, {"api_name": "decimal.Inexact", "line_number": 72, "usage_type": "attribute"}, {"api_name": "decimal.Rounded", "line_number": 73, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 74, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 94, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 95, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 116, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 118, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.date.fromtimestamp", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 135, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.date.fromtimestamp", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 137, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 137, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Key", "line_number": 157, "usage_type": "call"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "4160611138", "text": "import csv\nfrom fileinput import filename\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nfilename = 'Chapter16/csv/Chicago.csv' #assign the csv file to filename\nwith open(filename) as file: #we assign the object of filename to file\n reader = csv.reader(file) #we call csv.reader and pass it the file object as an argument to create a reader oobject\n header_row = next(reader) #Store data from the first row with next function\n print(header_row)\n\n for index, column_header in enumerate(header_row): #Use this for loop/enumerate to find the indeces of date and temp min. (4,6)\n print(index, column_header)\n \n dates, highs, lows = [], [], [] # create a list\n for row in reader: # find the rows in reader\n if row[5] == '': # if the values in it are blank\n blank = (row[5]) #assign them to a variable that doesn't matter\n elif row[6] == '':\n blank = (row[6])\n else:\n low = int(row[6])\n lows.append(low)\n high = int(row[5]) #change the string to an int if its not blank and assign to variable high\n highs.append(high) #add the aformentioned variable to the list\n \n for row in reader:\n current_date = datetime.strptime(row[4], '%Y-%m-%d')\n dates.append(current_date)\n for i in dates:\n print(i) # I cannot figure out why this doesnt work sadly.\n print(highs) #print them to make sure they work (they do, just numbers now)\n print(lows)\n print(len(highs)) # find the number of values in the list (599, or roughly )\n\n\n\n\n #plotting the high temperatures\n plt.style.use('seaborn')\n fig, ax = plt.subplots()\n ax.plot(highs, c='orange')\n ax.plot(lows, c='blue')\n\n #Format plot\n ax.set_title(\"Daily high and low Temperatures, Chicago, 2022\", fontsize = 24)\n ax.set_xlabel('', fontsize =16)\n ax.set_ylabel(\"Temperature (F)\", fontsize = 16)\n ax.tick_params(axis = \"both\", which= \"major\", labelsize=16)\n\n plt.show()\n\n\n", "repo_name": "RiggityRussell/CIT228", "sub_path": "Chapter16/chicago_csv.py", "file_name": "chicago_csv.py", "file_ext": "py", "file_size_in_byte": 2007, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fileinput.filename", "line_number": 6, "usage_type": "name"}, {"api_name": "fileinput.filename", "line_number": 7, "usage_type": "argument"}, {"api_name": "csv.reader", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 40, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "9582120465", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 23 12:38:50 2019\n\n@author: michaelboles\n\"\"\"\n\n# set up working directory\nimport os\nos.chdir('/Users/michaelboles/Michael/Coding/2019/Realestate') # Mac\n#os.chdir('C:\\\\Users\\\\bolesmi\\\\Lam\\\\Coding\\\\Python\\\\2019\\\\Realestate') # PC\n\n# import data\nimport pandas as pd\ndata = pd.read_csv('./Data/listings/data_all_price_predictions.csv')\n\n# remind myself what the column names are\ndata.columns\n\n# create in-memory sqlite database, add dataframe\nfrom sqlalchemy import create_engine\nengine = create_engine('sqlite://', echo = False)\ndata.to_sql('Realestate', con=engine)\n\n# query database\nengine.execute(\"SELECT * FROM Realestate\").fetchall() # gets everything\nengine.execute('SELECT * FROM Realestate WHERE Zip = 94618').fetchall() # matches a zipcode\n\n# create a list from sql query \n# returns list of rowproxy objects, omits column names - why is this so hard\nrockridge = engine.execute('SELECT * FROM Realestate WHERE Zip = 94618').fetchall() # matches a zipcode\n", "repo_name": "mboles01/Realestate", "sub_path": "Old/SQL/sqlite.py", "file_name": "sqlite.py", "file_ext": "py", "file_size_in_byte": 1029, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.chdir", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "37541203182", "text": "print('='*6, 'ANO BISSEXTO', '='*6)\nprint('')\nfrom datetime import date # Biblioteca para capturar o ano atual do sistema.\na = int(input(\"Qual ano quer analisar? Ou digite 0 para o ano atual: \"))\n# Se o ano for divisível por 4 e tiver resto igual 0,\n# ou divisível por 100 tiver resto diferente de 0,\n# ou divisível por 400 tiver resto igual a 0.\nif a == 0:\n a = date.today().year # Para capturar o ano atual com o usuário digitando 0.\nif a % 4 == 0 and a % 100 != 0 or a % 400 == 0:\n print('O Ano {} é BISSEXTO.'.format(a))\nelse:\n print('O Ano {} NÃO é BISSEXTO.'.format(a))\n\n\n\n", "repo_name": "Edcarlos-Oliveira/PythonMundo1", "sub_path": "des032AnoBi.py", "file_name": "des032AnoBi.py", "file_ext": "py", "file_size_in_byte": 595, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.date.today", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "73708577794", "text": "#!/usr/bin/env python3\n\nimport argparse, re, os\nimport kmertools as kt\t\t#Available at https://github.com/jtladner/Modules\nimport fastatools as ft\t\t#Available at https://github.com/jtladner/Modules\nimport inout as io\t\t#Available at https://github.com/jtladner/Modules\nimport pandas as pd\nimport seaborn as sns\n\nfrom matplotlib import pyplot as plt\ntypeface='Arial'\n\n\n#Example command: coverage_per_seq_violinplot.py -d /Users/colleenung/Documents/197911_InfluenzavirusA/HA/SW_SC_noC/t0.200/197911_id_70_9_SWSC-x9-y30-t0.200.fasta -c /Users/colleenung/Documents/197911_InfluenzavirusA/HA/197911_id_70_9 -k 9 -t 0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95 --swCtoS -o 197911_id_70_9_coverage_per_seq_violinplot.png -s 197911_id_70_9_coverage_per_seq_stats.txt\n\nparser = argparse.ArgumentParser(description='''A script that will generate violin plot(s) to visualize the distribution of kmer coverage \n\t\t\t\t\t\tin the design on a per sequence basis. Can generate multiple violin plots, with each representing a different Xmer \n\t\t\t\t\t\tthreshold.''')\n\nparser.add_argument(\"-d\", \"--design\", metavar='\\b', help=\"Input design file. If looking at multiple Xmer thresholds, only provide path to one of the design files. Assuming designs share the same naming structure and are located in a directory containing subdirectories for each Xmer target threshold.\")\nparser.add_argument(\"-c\", \"--cluster\", metavar='\\b', help=\"Input cluster file to look at kmer coverage on a per sequence basis. Note, cluster names must end with cluster number.\")\n\nparser.add_argument(\"-k\", \"--ksize\", default=9, type=int, metavar='\\b', help=\"Size of kmer to use for looking at kmer coverage in the design [default: 9].\")\nparser.add_argument(\"-t\", \"--targets\", default=\"0.5,0.75,1\", metavar='\\b', help=\"Target thresholds to generate violin plots for. [default: 0.5,0.75,1]\")\nparser.add_argument(\"-o\", \"--output\", default=\"coverage_per_seq_violinplot.png\", metavar='\\b', help=\"Name of output PNG file with violin plot(s). [default: coverage_per_seq_violinplot.png]\")\nparser.add_argument(\"-s\", \"--statsoutput\", default=\"coverage_per_seq_violinplot.txt\", metavar='\\b', help=\"Name of output txt file with descriptive statistics. [default: coverage_per_seq_violinplot.txt]\")\nparser.add_argument(\"--swCtoS\", default=False, action=\"store_true\", help=\"Use this flag if Cysteine residues were converted to Serine residues in the SW portion of the design.\")\nparser.add_argument(\"-b\", \"--batchMode\", default=None, metavar='\\b', help=\"You can use this flag to run the script in batch mode. If used, it should be followed by the path to a tsv file with two columns and one row per design. The first column should correspond to --design and the second to --cluster. In this mode, the output filenames will be generated based on the input file names. [default: None]\")\n\n#New argument group to underscore that these arguments are required despite being provided with flags\n#reqArgs = parser.add_argument_group(\"required arguments\")\n\nargs = parser.parse_args()\n\n\n#Parsing target thresholds\ntargetThresh = sorted(list(set([float(x) for x in args.targets.split(\",\")])))\n\n#Prep for batch mode\nif args.batchMode:\n\tinputD = io.fileDict(args.batchMode, header=False)\nelse:\n\tinputD = {args.design:args.cluster}\n\n# Step through each design/cluster pair\nfor design, cluster in inputD.items():\n\t\n\t# Specify output names if running in batch mode\n\tif args.batchMode:\n\t\targs.output = \"%s_%s_vp.png\" % (os.path.basename(cluster), args.targets)\n\t\targs.statsoutput = \"%s_%s_vpStats.tsv\" % (os.path.basename(cluster), args.targets)\n\t\n\t#Reading in fasta file (in this case, cluster file). Returns two lists, the first containing seq names and the second containing its sequences.\n\tnames, seqs = ft.read_fasta_lists(cluster)\n\n\txthrList=[]\n\tcoverageperseqList=[]\n\tfor thr in targetThresh:\n\t\t#Using path of input design file to find design files for other desired target threshold(s), if applicable\n\t\tsearchstr= \".*/t([\\d.]*)/.*\"\n\t\tregexresult= re.search(searchstr, design)\n\t\tdesignPath= re.sub(str(regexresult.group(1)), (\"%.3f\" % (thr)), design)\n\n\t\t#Creating set of all unique kmers within design\n\t\tdesignkSet= kt.kmerSetFasta(designPath, args.ksize, filter=[])\n\n\t\tfor s in seqs:\n\t\t\tif args.swCtoS:\n\t\t\t\ts = s.replace(\"C\", \"S\")\n\t\t\t#Creating set of all unique kmers within sequence\n\t\t\tsSet = kt.kmerSet(s, args.ksize, filter=[\"X\"])\n\t\t\tif len(sSet)>0:\n\t\t\t\txmersCovered= sSet.intersection(designkSet)\n\t\t\t\tpercentCovered= (len(xmersCovered) / len(sSet))*100\n\t\t\t\txthrList.append((\"%.3f\" % (thr)))\n\t\t\t\tcoverageperseqList.append(percentCovered)\n\n\tlabelY= \"%% %dmers covered per sequence\" % args.ksize\n\tdataDict= {\"Xmer Threshold\":xthrList, labelY:coverageperseqList}\n\t#Creating pandas dataframe from dictionary\n\tdf = pd.DataFrame(dataDict)\n\n\n\t#Generating violin plot from pandas dataframe using Seaborn\n\tfig, ax = plt.subplots(1,1,figsize=(10,10),facecolor='w')\n\tsns.violinplot(x=df[\"Xmer Threshold\"], y=df[labelY], palette=\"Set3\", ax=ax)\n\tax.set_ylabel(labelY)\n\tax.set_ylim(0,100)\n\tax.set_xlabel(\"Xmer Threshold\")\n\tfig.savefig(args.output, bbox_inches='tight', dpi=200)\n\tplt.close(fig=fig)\n\n\n\t#Writing out file with descriptive statistics\n\twith open(args.statsoutput, \"w\") as fout:\n\t\tline1= \"\\tMaximum\\tQ3\\tMedian\\tQ1\\tMinimum\\tIQR\"\n\t\tfout.write(line1)\n\t\n\t\tfor thr in targetThresh:\n\t\t\tthrDF= df.loc[df[\"Xmer Threshold\"] == (\"%.3f\" % (thr))]\n\t\t\n\t\t\tmaximum= thrDF[labelY].max()\n\t\t\tq3= thrDF[labelY].quantile(q=0.75, interpolation='midpoint')\n\t\t\tmedian= thrDF[labelY].quantile(q=0.5, interpolation='midpoint')\n\t\t\tq1= thrDF[labelY].quantile(q=0.25, interpolation='midpoint')\n\t\t\tminimum= thrDF[labelY].min()\n\t\t\tIQR= q3-q1\n\t\t\n\t\tline2= \"\\n%.3f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\" % (thr,maximum,q3,median,q1,minimum,IQR)\n\t\tfout.write(line2)", "repo_name": "LadnerLab/Library-Design", "sub_path": "extensions/coverage_per_seq_violinplot.py", "file_name": "coverage_per_seq_violinplot.py", "file_ext": "py", "file_size_in_byte": 5732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "inout.fileDict", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "fastatools.read_fasta_lists", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 61, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 62, "usage_type": "call"}, {"api_name": "kmertools.kmerSetFasta", "line_number": 65, "usage_type": "call"}, {"api_name": "kmertools.kmerSet", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "seaborn.violinplot", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "16943469616", "text": "from django.contrib.auth.models import AnonymousUser\n\nfrom .models import User\n\nclass DummyAuthBackend:\n \"\"\" \"\"\"\n\ndef get_user(get_response):\n def middleware(request):\n email = request.headers.get('user')\n if email:\n request.user = User.objects.filter(email=email).first()\n if request.user is None:\n raise ValueError(\"User not found\")\n else:\n request.user = AnonymousUser()\n\n return get_response(request)\n\n return middleware\n", "repo_name": "osohq/oso-django-integration", "sub_path": "oso_tutorial/expenses/authorization.py", "file_name": "authorization.py", "file_ext": "py", "file_size_in_byte": 510, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.User.objects.filter", "line_number": 12, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.AnonymousUser", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "74348435395", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nimport librosa\n\nfrom postprocess_utils import seg_metrics\nfrom utils import extract_features_melspec\n\naudio_filename = \"./samples/seg-test16.wav\"\nfeatures_filename = \"./samples/seg-test_features.npy\"\n# predictions_filename = \"samples/predictions_2018-05-24_17-48.npy\"\n\naudio, sr = librosa.load(audio_filename, sr=16000)\n# predictions = np.load(predictions_filename)\n# features = np.load(features_filename)\nfeatures = extract_features_melspec(audio, sr)\n\nprint(\"AUDIO\", audio.shape)\n# print(\"PREDICTIONS\", predictions.shape)\nprint(\"FEATURES\", features.shape)\n\ntimeseries_length = 100\nhop_length = 25\n\n# preds = deoverlap_predictions(predictions, features, hop_length)\n# norm_preds = defragment_vad(preds)\n\n# reference = [(6.42, 6.85), (13.49, 13.78)]\nreference = [(0, 6.42), (6.42, 13.49), (13.49, 20.43)]\n\n# lium = [(13.55, 13.67)]\nlium = [(0, 13.55), (13.55, 20.43)]\n\nref_plot = [0.1 for _ in range(len(audio))]\nfor r in reference:\n sr = 16000\n (start, end) = librosa.core.time_to_samples(r, sr=sr)\n start = max((0, start))\n end = min((len(audio), end))\n print(\"REF\", start, end)\n ref_plot[start:end] = [0.9 for _ in range(end - start)]\nprint(len(ref_plot))\n\n\nlium_seg = [0 for _ in range(len(audio))]\nfor l in lium:\n sr = 16000\n (start, end) = librosa.core.time_to_samples(l, sr=sr)\n start = max((0, start))\n end = min((len(audio), end))\n print(\"LIUM\", start, end)\n lium_seg[start:end] = [1 for _ in range(end - start)]\nprint(len(lium_seg))\n\nseg_metrics(lium, reference)\n\nfig, (\n (ax1),\n (ax2),\n # (ax3)\n) = plt.subplots(2, 1)\n\nax1.plot(audio)\nax1.set_title('skaņas līkne', fontsize='large')\n\nax2.plot(lium_seg)\nax2.plot(ref_plot)\nax2.set_title('LIUM rezultāti', fontsize='large')\n\n# ax3.plot(norm_preds)\n# ax3.plot(ref_plot)\n# ax3.set_title('normalizēti rezultāti', fontsize='large')\n\nplt.show()\n\n\n\n", "repo_name": "dmednis/speaker-segmenter", "sub_path": "test_seg.py", "file_name": "test_seg.py", "file_ext": "py", "file_size_in_byte": 1908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "librosa.load", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.extract_features_melspec", "line_number": 15, "usage_type": "call"}, {"api_name": "librosa.core.time_to_samples", "line_number": 36, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 36, "usage_type": "attribute"}, {"api_name": "librosa.core.time_to_samples", "line_number": 47, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 47, "usage_type": "attribute"}, {"api_name": "postprocess_utils.seg_metrics", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "5406651356", "text": "from django.shortcuts import render\nfrom pymongo import MongoClient\nfrom models import *\n\n# Create your views here.\n\ndef saveRoute():\n client = MongoClient(\"localhost\", 27017)\n db = client.scrapping\n cursor = db.route_data.find()\n for each in cursor:\n r = RouteData(\n depTimeString=str(each.get('depTimeString')),\n maxUpperColumns=str(each.get('maxUpperColumns')),\n fromCity=str(each.get('FromCity')),\n maxLowerColumns=str(each.get('maxLowerColumns')),\n maxLowerRows=str(each.get('maxLowerRows')),\n DPInformationList=each.get('DPInformationList'),\n toCity=str(each.get('ToCity')),\n maxUpperRows=str(each.get('maxUpperRows')),\n vehicleType=str(each.get('vehicleType')),\n BPInformationList=each.get('BPInformationList'),\n travelDate=str(each.get('travelDate')),\n busType=str(each.get('busType')),\n MPax=str(each.get('MPax')),\n serviceName=str(each.get('serviceName')),\n seatList=str(each.get('seatlist')),\n toCityId=str(each.get('ToCityId')),\n operatorId=str(each.get('operatorId')),\n amenities=str(each.get('amenties')),\n notes=str(each.get('Notes')),\n dateOFJourney=str(each.get('DateOfJourney')),\n routeId=str(each.get('RouteId')),\n travels=str(each.get('Travels')),\n arrTime=str(each.get('arrTime')),\n arrTimeString=str(each.get('arrTimeString')),\n serviceNumber=str(each.get('serviceNo')),\n aes=str(each.get('aes')),\n mxSPrTxn=str(each.get('mxSPrTxn')),\n depTime=str(each.get('depTime')),\n isBPMapLinkShown=str(each.get('isBPMapLinkShown')),\n fromCityId=str(each.get('FromCityId')),\n param42=each.get('param42')\n )\n r.save()\n # break\n\n\ndef saveTrip():\n client = MongoClient(\"localhost\", 27017)\n db = client.scrapping\n cursor = db.trip.find()\n\n for each in cursor:\n data = each.get('data')\n t = TripData(\n status=str(each.get('status')),\n defaultSorting=each.get('DefaultSorting'),\n amenitiesData=str(each.get('amenitiesData')),\n message=str(each.get('message'))\n )\n t.save()\n try:\n for singleData in data:\n try:\n tsd = TripSingleData(\n tripData=t,\n DPList=singleData.get('DPLst'),\n vt=str(singleData.get('vt')),\n busType=str(singleData.get('BsTp')),\n Tips=str(singleData.get('Tips')),\n BsSvid=str(singleData.get('BsSvId')),\n Sort=str(singleData.get('Sort')),\n IsDPA=str(singleData.get('IsDPA')),\n NSA=str(singleData.get('NSA')),\n params42=singleData.get('param42'),\n serviceName=str(singleData.get('serviceName')),\n giry=str(singleData.get('Glry')),\n RbPrefCode=str(singleData.get('RbPrefCode')),\n WnSt=str(singleData.get('WnSt')),\n DpTm=str(singleData.get('DpTm')),\n IsAC=str(singleData.get('IsAc')),\n IsNAc=str(singleData.get('IsNAc')),\n RtId=str(singleData.get('RtId')),\n IsSpF=str(singleData.get('IsSpF')),\n IsSlpr=str(singleData.get('IsSlpr')),\n serviceId=str(singleData.get('serviceId')),\n FareList=singleData.get('FrLst'),\n Ament=singleData.get('Ament'),\n OpId=str(singleData.get('OpId')),\n BPList=singleData.get('BPLst'),\n IsMTE=str(singleData.get('IsMTE')),\n Rtg=singleData.get('Rtg'),\n IsBpDpSearch=str(singleData.get('IsBpDpSearch')),\n jDur=str(singleData.get('jDur')),\n isStr=str(singleData.get('IsStr')),\n Tvs=str(singleData.get('Tvs')),\n Cmpg=singleData.get('Cmpg'),\n BsSt=str(singleData.get('BsSt')),\n ArTm=str(singleData.get('ArTm'))\n )\n tsd.save()\n except:\n pass\n except:\n pass\n # break\n # break\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "ankittube/dbtransfer", "sub_path": "transferdb/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4638, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pymongo.MongoClient", "line_number": 8, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "40037380553", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nimport telegram\nfrom mensa import fetch_all_menus, overwrite_current_menus\nfrom time import sleep\nimport sys\nfrom datetime import datetime\nfrom config import Config\nfrom utils import format_menus\nimport asyncio\n\n\nasync def send_menus(bot, config):\n \"\"\"Run the bot.\"\"\"\n date = datetime.today()\n overwrite_current_menus(config)\n mensa_menus = fetch_all_menus(config, date)\n users_mensas = config.get_database().get_all_user_and_mensas()\n print(\"Sending menus in %d messages\" % (len(users_mensas)))\n for cid, mensa in users_mensas:\n menus = mensa_menus[mensa]\n if not menus:\n continue\n await send_message(bot, cid, format_menus(mensa, menus, date))\n\n\nasync def send_message_to_all(bot, users, msg):\n print(\"Sending message to all %d users\" % len(users))\n for cid in users:\n await send_message(bot, cid, msg)\n\n\nasync def send_message(bot, chat_id, message):\n try:\n await bot.send_message(chat_id=chat_id, text=message,\n parse_mode='HTML')\n except Exception as ex:\n print(\"Could not send message to\", chat_id, str(ex))\n sleep(0.05) # avoiding flood limits\n\n\nasync def main():\n if len(sys.argv) == 1:\n print(f\"Usage: python3 {__file__} [message to all]\")\n sys.exit()\n config = Config(sys.argv[1])\n bot = telegram.Bot(config.get_token())\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - '\n '%(message)s')\n async with bot:\n if len(sys.argv) > 2:\n await send_message_to_all(bot, config.get_database().get_users(),\n \" \".join(sys.argv[2:]))\n else:\n await send_menus(bot, config)\n\nif __name__ == '__main__':\n asyncio.run(main())\n", "repo_name": "dnrhead/mensa_bot", "sub_path": "Bot/send_messages.py", "file_name": "send_messages.py", "file_ext": "py", "file_size_in_byte": 1861, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "datetime.datetime.today", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "mensa.overwrite_current_menus", "line_number": 17, "usage_type": "call"}, {"api_name": "mensa.fetch_all_menus", "line_number": 18, "usage_type": "call"}, {"api_name": "config.get_database", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.format_menus", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 47, "usage_type": "attribute"}, {"api_name": "telegram.Bot", "line_number": 48, "usage_type": "call"}, {"api_name": "config.get_token", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 52, "usage_type": "attribute"}, {"api_name": "config.get_database", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "asyncio.run", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "4710100071", "text": "import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\nimport plotly.graph_objects as go\n\n\nyears = list(range(1985, 2011))\n\nst.title('Gender Pay Gap')\nst.subheader('Data Analytics Project')\n\nst.cache_data()\ndef load_dataset():\n df=pd.read_csv(\"cleaned_gpg_v2.csv\", usecols=['year','region','relate','sex','race','marst','occ','ind','classwkr',\n 'hrswork','incwage','annhrs','hrwage','inflate','expendbase10','perconexp',\n 'potexp','potexp2','o_occ1990','o_occ1950','o_ind1950','o_ind1990'] )\n df.set_index('year',inplace=True)\n return df\n\nwith st.spinner('Loading data...'):\n df = load_dataset()\n\n\nyears = df.index.unique().tolist()\nselectyear = st.sidebar.selectbox('Select a year', years)\nst.info(f'You selected {selectyear}')\n\nst.write(df.shape)\nr = df.race.unique().tolist()\ns = df.sex.unique().tolist()\noccupation = df.occ.unique().tolist()\nindustry = df.ind.unique().tolist()\n\nrace = st.sidebar.selectbox('select a race', r )\nsex = st.sidebar.selectbox('select a sex', s )\noccup = st.sidebar.selectbox('select a occupation', occupation )\nindus = st.sidebar.selectbox('select a industry', industry )\n\nif st.sidebar.checkbox('Show raw data'):\n st.dataframe(df[(df['race']== race) & (df['sex'] == sex)][:1000])\n st.dataframe(df[(df['occ']== occup) & (df['ind'] == indus)][:1000])\n\ndf_year = df[df.index == selectyear]\n\nfig1 = px.area(x=df.index, y=df['incwage'], title=f'INCOME WAGE')\nfig2 = px.scatter( x=df.index, y=df['occ'], title=f'OCCUPATION')\nfig3 = px.bar(x=df.index, y=df['ind'], title=f'INDUSTRY')\nfig4 = px.box(x=df.index, y=df['hrswork'], title=f'HOURS WORKED')\nif st.checkbox('Show income wage'):\n st.plotly_chart(fig1, use_container_width=True)\nif st.checkbox('Show occupation'):\n st.plotly_chart(fig2, use_container_width=True)\nif st.checkbox('Show industry'):\n st.plotly_chart(fig3, use_container_width=True)\nif st.checkbox('Show hours worked'):\n st.plotly_chart(fig4, use_container_width=True)\n\nfig5 = px.scatter(df, x=\"incwage\", y=\"hrswork\", color=\"sex\", marginal_y=\"violin\", title=f'INCOME WAGE VS HOURS WORKED')\nif st.checkbox('Show income wage vs hours worked'):\n st.plotly_chart(fig5, use_container_width=True)\n\nif st.checkbox('Show group analysis'):\n fig7 = px.sunburst(df, path=['classwkr','sex'], values='incwage', title=f'CLASS OF WORKERS AND THEIR INCOME WAGE')\n st.plotly_chart(fig7, use_container_width=True)\n fig11 = px.sunburst(df,path=['marst','sex'],values='annhrs',title=f'MARITAL STATUS AND NO.OF HOURS WORKED')\n st.plotly_chart(fig11, use_container_width=True)\n fig14 = px.treemap(df,names=[''])\nfig8 = px.bar(df, x=\"incwage\", y=\"expendbase10\", color=\"sex\", title=f'INCOME WAGE VS EXPENDITURE')\nif st.checkbox('Show income wage vs expenditure'):\n st.plotly_chart(fig8, use_container_width=True)\nfig9 = px.histogram(df,x=\"o_occ1990\",y=\"sex\",title=f'OCCUPATION IN 1990')\nfig10 = px.histogram(df,x='o_occ1950',y='sex',title=f'OCCUPATION IN 1950')\nif st.checkbox('Show difference in occupation'):\n st.plotly_chart(fig9, use_container_width=True)\n st.plotly_chart(fig10, use_container_width=True)\nfig12 = px.violin(df,x='hrswork',y='incwage',title=f'INCOME WAGE VS HOURS WORKED')\nfig13 = px.violin(df,x='annhrs',y='hrwage',title=f'HOURLY WAGE VS NO.OF HOURS WORKED')\nif st.checkbox('Show comparison between income wage and hours wage'):\n st.plotly_chart(fig12, use_container_width=True)\n st.plotly_chart(fig13, use_container_width=True)\n", "repo_name": "Pranshirastogi/Data-analytics-python-course-", "sub_path": "major project/work/dap.py", "file_name": "dap.py", "file_ext": "py", "file_size_in_byte": 3575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "streamlit.title", "line_number": 10, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 11, "usage_type": "call"}, {"api_name": "streamlit.cache_data", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "streamlit.spinner", "line_number": 21, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 26, "usage_type": "attribute"}, {"api_name": "streamlit.info", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 35, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 36, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 37, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 38, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.checkbox", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 40, "usage_type": "attribute"}, {"api_name": "streamlit.dataframe", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 42, "usage_type": "call"}, {"api_name": "plotly.express.area", "line_number": 46, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 46, "usage_type": "name"}, {"api_name": "plotly.express.scatter", "line_number": 47, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 47, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 48, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 48, "usage_type": "name"}, {"api_name": "plotly.express.box", "line_number": 49, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 49, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 54, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 55, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 56, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 57, "usage_type": "call"}, {"api_name": "plotly.express.scatter", "line_number": 59, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 59, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 61, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 63, "usage_type": "call"}, {"api_name": "plotly.express.sunburst", "line_number": 64, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 64, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 65, "usage_type": "call"}, {"api_name": "plotly.express.sunburst", "line_number": 66, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 66, "usage_type": "name"}, {"api_name": "streamlit.plotly_chart", "line_number": 67, "usage_type": "call"}, {"api_name": "plotly.express.treemap", "line_number": 68, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 68, "usage_type": "name"}, {"api_name": "plotly.express.bar", "line_number": 69, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 69, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 71, "usage_type": "call"}, {"api_name": "plotly.express.histogram", "line_number": 72, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 72, "usage_type": "name"}, {"api_name": "plotly.express.histogram", "line_number": 73, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 73, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 74, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 75, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 76, "usage_type": "call"}, {"api_name": "plotly.express.violin", "line_number": 77, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 77, "usage_type": "name"}, {"api_name": "plotly.express.violin", "line_number": 78, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 78, "usage_type": "name"}, {"api_name": "streamlit.checkbox", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.plotly_chart", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "5868672374", "text": "from collections import deque\n\n\nclass MovingAverage:\n\n def __init__(self, size: int):\n self.q = deque()\n self.s = size\n\n def next(self, val: int) -> float:\n if len(self.q) >= self.s:\n self.q.pop()\n self.q.appendleft(val)\n return sum(self.q) / len(self.q)\n\n# Your MovingAverage object will be instantiated and called as such:\n# obj = MovingAverage(size)\n# param_1 = obj.next(val)", "repo_name": "vramanrs/Leetcode-python", "sub_path": "moving-average-from-data-stream.py", "file_name": "moving-average-from-data-stream.py", "file_ext": "py", "file_size_in_byte": 430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "collections.deque", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "70418661635", "text": "\"\"\"\nCollection of all business logic the application must be able to\nperform.\n\"\"\"\nfrom contextlib import contextmanager\nfrom sqlalchemy.exc import IntegrityError\nfrom database import Session\nfrom models import User\n\n\n@contextmanager\ndef session_scope():\n \"\"\"\n Context for dealing with sessions. This allows the developer not to have to\n worry perse about closing and creating the session.\n \"\"\"\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n\ndef name_registered(name):\n \"\"\"\n Confirm or deny the uniqueness of the given user in the database.\n \"\"\"\n with session_scope() as session:\n if session.query(User).filter(User.name == name).one_or_none():\n return True\n return False\n\n\ndef email_registered(email):\n \"\"\"\n Confirm or deny the uniqueness of the given email in the database.\n \"\"\"\n with session_scope() as session:\n if session.query(User).filter(User.email == email).one_or_none():\n return True\n return False\n\n\ndef register_user(name, email):\n \"\"\"\n Register a user in the database by a name and email.\n \"\"\"\n with session_scope() as session:\n new_user = User(name, email)\n session.add(new_user)\n try:\n session.commit()\n except IntegrityError:\n session.rollback()\n raise\n else:\n return new_user.id\n ", "repo_name": "Drvanon/FlaskBoilerplate", "sub_path": "controllers.py", "file_name": "controllers.py", "file_ext": "py", "file_size_in_byte": 1499, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "database.Session", "line_number": 17, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 11, "usage_type": "name"}, {"api_name": "models.User", "line_number": 33, "usage_type": "argument"}, {"api_name": "models.User.name", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 43, "usage_type": "argument"}, {"api_name": "models.User.email", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "14958532073", "text": "\"\"\"\nA chef has collected data on the satisfaction level of his n dishes. Chef can cook any dish in 1 unit of time.\n\nLike-time coefficient of a dish is defined as the time taken to cook that dish including previous dishes multiplied\nby its satisfaction level i.e. time[i] * satisfaction[i].\n\nReturn the maximum sum of like-time coefficient that the chef can obtain after dishes preparation.\n\nDishes can be prepared in any order and the chef can discard some dishes to get this maximum value.\n\n\n\nExample 1:\n\nInput: satisfaction = [-1,-8,0,5,-9] Output: 14 Explanation: After Removing the second and last dish, the maximum\ntotal like-time coefficient will be equal to (-1*1 + 0*2 + 5*3 = 14). Each dish is prepared in one unit of time.\nExample 2:\n\nInput: satisfaction = [4,3,2]\nOutput: 20\nExplanation: Dishes can be prepared in any order, (2*1 + 3*2 + 4*3 = 20)\nExample 3:\n\nInput: satisfaction = [-1,-4,-5]\nOutput: 0\nExplanation: People do not like the dishes. No dish is prepared.\n\n\nConstraints:\n\nn == satisfaction.length\n1 <= n <= 500\n-1000 <= satisfaction[i] <= 1000\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def maxSatisfaction(self, satisfaction: List[int]) -> int:\n # Like-time coefficient of a dish is defined as the time taken to cook that dish including previous dishes\n # multiplied by its satisfaction level i.e. time[i] * satisfaction[i].\n\n max_satisfaction = sorted(satisfaction, reverse=True)\n\n # result -> sum of time to cook the positive reviewed dish [Note: +ve -> positive like time to cook same dish\n # considering previous time to cook the previous dish]\n # like time -> it is like time coefficient of the dish prepared by chef in a day[given satisfaction array]\n result, like_time = 0, 0\n\n # loop throughout dish time in list.\n for dish_time in max_satisfaction:\n\n # like time -> if positive, add time to like time coefficient to given dish time\n like_time += dish_time\n\n # like time -> if negative, then there is no need of consideration\n if like_time < 0:\n break\n\n # result -> add result to like time coefficient\n result += like_time\n\n return result\n\n\nsolution = Solution()\nassert 14 == solution.maxSatisfaction(satisfaction=[-1, -8, 0, 5, -9])\nassert 20 == solution.maxSatisfaction(satisfaction=[4, 3, 2])\nassert 0 == solution.maxSatisfaction(satisfaction=[-1, -4, -5])\n", "repo_name": "ImSakunthala/leetcode", "sub_path": "Advance_level/reducing_dishes.py", "file_name": "reducing_dishes.py", "file_ext": "py", "file_size_in_byte": 2450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "typing.List", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "35825932627", "text": "from collections import OrderedDict\n\n\nfrom bitcoin.core import str_money_value, b2lx, b2x, x\nfrom bitcoin.wallet import CBitcoinAddress, CBitcoinAddressError\nfrom bitcoin.rpc import unhexlify, hexlify\nfrom bitcoin.core import COutPoint\n\nfrom .exceptions import ChainError, BacktrackError\n\nCOINBASE_TX = b'\\x00'*32\n\ndef bitcoin_to_string(value):\n \"\"\"Convert bitcoin value to a string\"\"\"\n #TODO: Append zeroes up to standard length\n bitcoin_str = str_money_value(abs(value))\n if value < 0:\n return '- '+bitcoin_str\n else:\n return bitcoin_str\n\n\n\nclass TxOut(object):\n \"\"\"Transaction ouput\"\"\"\n __slots__ = ('tx', 'nout', 'addr', 'value')\n\n def __init__(self, tx, nout, addr=None, value=0):\n \"\"\"\n Arguments:\n tx (string): Transaction hash\n nout (int): Transaction output number\n addr (string):\n value (int): Output value\n \"\"\"\n self.tx = tx\n self.nout = nout\n self.addr = addr\n self.value = value\n\n @staticmethod\n def addr_from_script(script):\n \"\"\"Generate output addres from scriptPubKey\"\"\"\n try:\n addr = str(CBitcoinAddress.from_scriptPubKey(script))\n except CBitcoinAddressError:\n addr = None\n \n return addr\n\n @classmethod\n def from_tx(cls, tx, nout):\n \"\"\"\n WARNING: This is not efficient to process all the transaction outputs\n because of GetTxid() does not cache the result.\n\n Arguments:\n tx (bitcoin.CTransaction): Transaction\n nout (int): Output number\n\n Returns:\n Inialized TxOut\n\n Exceptions:\n CBitcoinAddressError: Couldn't convert transaction output scriptPubKey \n to address\n IndexError: The requested output doesn't exist\n \"\"\"\n # GetTxid instead of GetHash for segwit support (bip-0141)\n txhash = tx.GetTxid()\n cout = tx.vout[nout]\n addr = TxOut.addr_from_script(cout.scriptPubKey)\n return cls(txhash, nout, addr, value=cout.nValue)\n\n def __hash__(self):\n return hash((self.tx, self.nout))\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return False\n return self.tx == other.tx and self.nout == other.nout\n \n def __repr__(self):\n return \"TxOut({}, {}, {}, {})\".format(\n self.tx, \n self.nout, \n self.addr, \n self.value)\n\n def __str__(self): \n return \"TxOut({}, {}, {}, {})\".format(\n b2x(self.tx), \n self.nout, \n self.addr, \n str_money_value(self.value))\n\n\n\nclass Block(object):\n\n __slots__=('block_hash', 'height', 'vin', 'vout')\n\n def __init__(self, block_hash, height, vin=None, vout=None):\n \n self.block_hash = block_hash\n self.height = height\n if not vin:\n vin = []\n if not vout:\n vout = []\n\n self.vin = list(vin)\n self.vout = list(vout)\n\n def __hash__(self):\n return hash(self.block_hash)\n\n def __eq__(self, other):\n if isintance(other, self.__class__):\n return self.block_hash==other.block_hash\n else:\n return False\n\n def __repr__(self):\n return \"{}({},{},{},{})\".format(self.__class__.__name__,\n self.block_hash,\n self.height,\n self.vin,\n self.vout)\n\n def __str__(self):\n return \"{}: {} ({})\".format(self.__class__.__name,\n self.block_hash,\n self.height)\n\n def check_balance(self):\n \"\"\"Check block input value sum is equeal to output value sum\"\"\"\n input_value=0\n output_value=0\n\n for vin in self.vin:\n input_value += vin.value\n\n for vout in self.vout:\n output_value += vout.value\n\n return input_value == output_value\n\n\nclass TxOutCache(object):\n \n def __init__(self, proxy, size=500000):\n \"\"\"\n Arguments:\n size (int): max cache size\n proxy (proxy.BitcoindProxy)\n \"\"\"\n self._proxy = proxy\n self._max_size = size\n\n self._txout_cache = OrderedDict()\n\n self._cache_miss = 0\n self._cache_hit = 0\n\n def del_txout(self, txout):\n \"\"\"Remove txout from cache\"\"\"\n self._txout_cache.pop(txout, None)\n \n def add_txout(self, txout):\n \"\"\"Add TxOut to cache\"\"\"\n if len(self._txout_cache)>=self._max_size:\n self._txout_cache.popitem(last=False)\n \n self._txout_cache[txout] = txout\n\n def purge_cache(self):\n \"\"\"Purge complete cache\"\"\"\n self._txout_cache = OrderedDict()\n\n def get_txout(self, txhash, nout):\n \"\"\"\n Get TxOut from cache or if not available query bitcoind_proxy\n \n Arguments:\n txhash (str): Transactions hash\n nout (int): Output number\n \"\"\"\n try:\n txout = self._txout_cache[TxOut(txhash, nout)]\n self._cache_hit += 1\n return txout\n except KeyError:\n pass\n\n self._cache_miss += 1\n\n with self._proxy as proxy: \n try:\n tx = proxy.get_transaction(txhash)\n except ConnectionError:\n raise\n except Exception:\n raise ChainError(\"Unknown Txout {} {}\".format(txhash, nout))\n \n # Manually initilize TxOut so there is no need to generate the transaction\n # hash a second time. (faster than:txout = TxOut.from_tx(rawtx, nout))\n for out, cout in enumerate(tx.vout):\n addr = TxOut.addr_from_script(cout.scriptPubKey)\n self.add_txout(TxOut(txhash, out, addr, value=cout.nValue))\n\n # Now txout must be in cache\n self._cache_hit -= 1 # Fix hit/miss counter\n return self.get_txout(txhash, nout)\n \n\nclass BlockFactory(object):\n\n def __init__(self, proxy, size=1000000):\n \"\"\"\n Arguments:\n size (int): max cache size\n proxy (proxy.BitcoindProxy)\n \"\"\"\n self._proxy = proxy\n self._max_size = size\n \n self._cache = TxOutCache(proxy, size)\n\n def purge_cache(self):\n \"\"\"Completely purge cache\"\"\"\n self._cache.purge()\n\n def _transaction_inputs(self, tx):\n \"\"\"Generate transaction inputs from source transaction outputs\"\"\" \n inputs = []\n txhash = tx.GetTxid()\n \n for vin in tx.vin:\n txin = vin.prevout\n \n if txin.hash == COINBASE_TX:\n continue\n\n txout = self._cache.get_txout(txin.hash, txin.n)\n if txout is None:\n logger.error(\"Unable to find TxOut {} {}\".format(\n txin_hash, txin_n))\n else:\n inputs.append(txout)\n\n return inputs\n\n def _transaction_outputs(self, tx):\n \"\"\"Generate transaction TxOut\"\"\" \n outputs = []\n\n # GetTxid instead of GetHash for segwit support (bip-0141)\n txhash = tx.GetTxid()\n\n for n, utxo in enumerate(tx.vout): \n \n addr = TxOut.addr_from_script(utxo.scriptPubKey)\n out = TxOut(txhash, n, addr, value=utxo.nValue)\n outputs.append(out)\n\n return outputs\n\n def _block_outputs(self, block):\n \"\"\"Generate the TxOut for all the block outputs\"\"\"\n block_txouts = []\n\n for tx in block.vtx:\n block_txouts.extend(self._transaction_outputs(tx))\n \n return block_txouts\n\n def _block_inputs(self, block):\n \"\"\"Generate the TxOut for all the block inputs\"\"\"\n block_inputs = []\n\n for tx in block.vtx:\n block_inputs.extend(self._transaction_inputs(tx))\n\n return block_inputs\n\n def build_block(self, block, height=None):\n \"\"\"Build Block from bitcoin.CBlock\"\"\"\n blockhash = block.GetHash()\n \n \n outputs = self._block_outputs(block)\n \n # Add outputs to cache, because the outputs from a transaction\n # can be used as inputs for other transactions in the same block\n for txout in outputs:\n if txout.value > 0:\n self._cache.add_txout(txout)\n\n # Generate inputs \n inputs = self._block_inputs(block)\n #TODO: Remove outputs added to cache if input generations fails???\n\n\n # With the complete block remove used inputs from cache to save space\n #for txout in inputs:\n # self._cache.del_txout(txout)\n\n block = Block(blockhash, height, inputs, outputs)\n return block\n", "repo_name": "secnot/bitcoin-balance", "sub_path": "bitbalance/primitives.py", "file_name": "primitives.py", "file_ext": "py", "file_size_in_byte": 8913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "bitcoin.core.str_money_value", "line_number": 16, "usage_type": "call"}, {"api_name": "bitcoin.wallet.CBitcoinAddress.from_scriptPubKey", "line_number": 45, "usage_type": "call"}, {"api_name": "bitcoin.wallet.CBitcoinAddress", "line_number": 45, "usage_type": "name"}, {"api_name": "bitcoin.wallet.CBitcoinAddressError", "line_number": 46, "usage_type": "name"}, {"api_name": "bitcoin.core.b2x", "line_number": 92, "usage_type": "call"}, {"api_name": "bitcoin.core.str_money_value", "line_number": 95, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 161, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 179, "usage_type": "call"}, {"api_name": "exceptions.ChainError", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "43038497878", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\ndef final_depth_integrate(finname,foutname):\n \n #jupyter nbconvert --to script final_depth_integrate.ipynb \n # Use the above script in a Terminal Window to convert to a .py file\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import statistics as st\n import time as time\n\n from IPython.core.interactiveshell import InteractiveShell\n InteractiveShell.ast_node_interactivity = \"last\"\n #other options include 'none', 'last', 'last_expr'\n\n df1=pd.read_csv(finname) \n\n df2=df1.copy(deep=True)\n df2.drop(df2[df2[' Layer']!=2].index, inplace=True)\n df2=df2.reset_index(drop=True)\n \n df1=df1.replace([-9999.0, 9999.0, -999.0, 999.0], np.nan)\n df2=df2.replace([-9999.0, 9999.0, -999.0, 999.0], np.nan)\n #df1=df1.replace(9999.0, np.nan)\n #df2=df2.replace(-9999.0, np.nan)\n #df2=df2.replace(9999.0, np.nan)\n\n df1_interval=np.nanmax(df1[' Interval'])\n df2_interval=np.nanmax(df2[' Interval'])\n mx_interval_int=int(df1_interval)\n\n #Check that the number or instances with data (# of intervals) equals the number of rows in df2\n if len(np.unique(df1[' Interval'])) != len(df2.index):\n print('Mismatch in Length of Files!!! ' +finname+ ' NOT processed') \n else: \n bad_value=-9998\n tic=time.time()\n cnt=0 #Counter is needed in case interval is not sequential in the original csv file\n for i in range (mx_interval_int+1):\n if any(df1[' Interval']==i):\n #print(i)\n loar=df1[' Interval']==i\n #idx=loar[loar==True].index[-1] #Maybe Not Needed\n #df2[' NASC'][i]=sum((df1[' NASC'])[loar]) #THis Created Warnings! Better to use iloc like below\n\n df2.iloc[cnt,df2.columns.get_loc(' Sv_mean')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' NASC')]=sum((df1[' NASC'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Sv_max')]=np.nanmax((df1[' Sv_max'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Sv_min')]=np.nanmin((df1[' Sv_min'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Sv_noise')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' NASC_noise')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Height_mean')]=sum((df1[' Height_mean'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Depth_mean')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Samples')]=sum((df1[' Samples'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Layer_depth_max')]=np.nanmax((df1[' Layer_depth_max'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Layer_depth_min')]=np.nanmin((df1[' Layer_depth_min'])[loar])\n\n df2.iloc[cnt,df2.columns.get_loc(' Standard_deviation')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Skewness')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Kurtosis')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' ABC')]=sum((df1[' ABC'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Area_Backscatter_Strength')]=bad_value\n\n df2.iloc[cnt,df2.columns.get_loc(' Thickness_mean')]=sum((df1[' Thickness_mean'])[loar])\n df2.iloc[cnt,df2.columns.get_loc(' Range_mean')]=bad_value\n df2.iloc[cnt,df2.columns.get_loc(' Beam_volume_sum')]=sum((df1[' Beam_volume_sum'])[loar])\n cnt=cnt+1\n #tmp_date=df[' Date_M'][loar]\n #f_time.append((df[' Time_M'])[loar])\n toc=time.time()\n elapsed=toc-tic\n #print(elapsed)\n \n df2=df2.fillna(value=-9999.0)\n df2\n df2.to_csv (foutname, index = False, header=True)\n print('Writing ' +foutname+ ' with ' +str(len(df2.index))+ ' rows.') \n print('Processing took ' +str(elapsed)+ ' seconds.')\n print('')\n \n #import csv\n #csvData=[f_lon, f_lat, f_nasc]\n\n #zipped=zip(f_date,f_time,f_lon, f_lat, f_nasc)\n #zipped=zip(f_time,f_lon, f_lat, f_nasc)\n\n #with open('test.csv', 'w') as csvFile:\n # writer=csv.DictWriter(csvFile, fieldnames=[\"Time\",\"Lon_M\",\"Lat_M\",\"NASC\"])\n # writer.writeheader()\n # writer = csv.writer(csvFile)\n #writer.writeheader\n # writer.writerows(zipped)\n\n #csvFile.close()\n\n #type(f_lon)\n \n\n", "repo_name": "jeffdorman/krill_biomass", "sub_path": "programs/krill_biomass_processing/final_depth_integrate.py", "file_name": "final_depth_integrate.py", "file_ext": "py", "file_size_in_byte": 4369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "IPython.core.interactiveshell.InteractiveShell.ast_node_interactivity", "line_number": 19, "usage_type": "attribute"}, {"api_name": "IPython.core.interactiveshell.InteractiveShell", "line_number": 19, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.nanmax", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 39, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 66, "usage_type": "call"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "15858724367", "text": "import pygame, sys, random\r\nimport numpy as np\r\n\r\nclass Main:\r\n def __init__(self, fps=60, screen_resolution=()):\r\n self.fps = fps\r\n pygame.init()\r\n self.screen = pygame.display.set_mode((1080,720))\r\n self.clock = pygame.time.Clock()\r\n self.display_width, self.display_height = pygame.display.Info().current_w, pygame.display.Info().current_h\r\n self.bool_pos = 0\r\n self.x = 5\r\n self.conter = 0\r\n #0 - Левое Крыло; 2 - Правое крыло; 1 - Основа; 3 - Заднее Левое Крыло; 4 - Заднее Правое Крыло\r\n self.list_0_coordinates = [[215, 100], [255, 100],\r\n [365, 307], [265, 310]]\r\n self.list_2_coordinates = [[215, 560], [255, 560],\r\n [365, 353], [265, 350]]\r\n self.list_3_coordinates = [[0, 250], [20, 250],\r\n [90, 330], [30, 330]]\r\n self.list_4_coordinates = [[0, 410], [20, 410],\r\n [90, 330], [30, 330]]\r\n self.list_1_coordinates = [[505, 330], [495, 345], [480, 360],\r\n [50, 345], [30, 330],\r\n [50, 315], [480, 300],[495, 315]]\r\n self.p_list = [self.list_0_coordinates, self.list_1_coordinates, self.list_2_coordinates, self.list_3_coordinates, self.list_4_coordinates]\r\n #Тут Отрисовка перед циклом\r\n\r\n def run_while(self):\r\n while True:\r\n self.conter += 1\r\n if not self.bool_pos:\r\n self.drawing_in_a_loop()\r\n self.event_handler()\r\n pygame.display.flip()\r\n self.clock.tick(self.fps)\r\n\r\n def event_handler(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN :\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.key == pygame.K_SPACE:\r\n self.bool_pos = 0\r\n if event.type == pygame.MOUSEBUTTONDOWN :\r\n self.pos = event.pos\r\n for p in self.p_list:\r\n if self.point_in_polygon(p, self.pos):\r\n self.fire()\r\n self.bool_pos = not self.bool_pos\r\n break\r\n #print(self.pos)\r\n #if self.pos\r\n\r\n def drawing_in_a_loop(self):\r\n if self.list_4_coordinates[0][0] >= pygame.display.Info().current_w:\r\n self.list_0_coordinates = [[-290, 100], [-250, 100], [-140, 307], [-240, 310]]\r\n self.list_2_coordinates = [[-290, 560], [-250, 560], [-140, 353], [-240, 350]]\r\n self.list_3_coordinates = [[-505, 250], [-485, 250], [-415, 330], [-475, 330]]\r\n self.list_4_coordinates = [[-505, 410], [-485, 410], [-415, 330], [-475, 330]]\r\n self.list_1_coordinates = [[0, 330], [-10, 345], [-25, 360], [-455, 345],\r\n [-475, 330], [-455, 315], [-25, 300], [-10, 315]]\r\n self.p_list = [self.list_0_coordinates, self.list_1_coordinates, self.list_2_coordinates, self.list_3_coordinates, self.list_4_coordinates]\r\n\r\n self.screen.fill((125,249,255))\r\n for i in range(len(self.list_0_coordinates)):\r\n self.list_0_coordinates[i][0] += self.x\r\n for i in range(len(self.list_1_coordinates)):\r\n self.list_1_coordinates[i][0] += self.x\r\n for i in range(len(self.list_2_coordinates)):\r\n self.list_2_coordinates[i][0] += self.x\r\n for i in range(len(self.list_3_coordinates)):\r\n self.list_3_coordinates[i][0] += self.x\r\n for i in range(len(self.list_4_coordinates)):\r\n self.list_4_coordinates[i][0] += self.x\r\n\r\n pygame.draw.lines(self.screen, \"black\", True, self.list_0_coordinates, 5)\r\n pygame.draw.lines(self.screen, \"black\", True, self.list_2_coordinates, 5)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_0_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_2_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_3_coordinates)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_4_coordinates)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_3_coordinates, 5)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_4_coordinates, 5)\r\n pygame.draw.polygon(self.screen, (181,184,187), self.list_1_coordinates)\r\n pygame.draw.aalines(self.screen, \"black\", True, self.list_1_coordinates, 5)\r\n\r\n def point_in_polygon(self, p, point):\r\n result = False\r\n size = len(p)\r\n j = size - 1\r\n for i in range(size):\r\n if (p[i][1] < point[1] and p[j][1] >= point[1] or p[j][1] < point[1]\r\n and p[i][1] >= point[1]) and (p[i][0] + (point[1] - p[i][1]) / (p[j][1] - p[i][1]) * (p[j][0] - p[i][0]) < point[0]):\r\n result = not result\r\n j = i\r\n return result\r\n\r\n def fire(self):\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 3)\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 7, 1)\r\n pygame.draw.circle(self.screen, \"red\", self.pos, 9, 1)\r\n\r\nMain(24).run_while()\r\n", "repo_name": "fevzifevziev/Computer_Graphics", "sub_path": "lab_5/5.02.py", "file_name": "5.02.py", "file_ext": "py", "file_size_in_byte": 5480, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pygame.init", "line_number": 7, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.display.Info", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.K_SPACE", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.display.Info", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.draw.lines", "line_number": 81, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pygame.draw.lines", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 84, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pygame.draw.aalines", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.draw.aalines", "line_number": 88, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.draw.aalines", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 104, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 105, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 106, "usage_type": "attribute"}]} +{"seq_id": "33563734214", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 20 12:54:50 2019\n\n@author: mpanaggio\n\"\"\"\n\n\nimport learn_kuramoto_files as lk\nimport numpy as np\nimport importlib as imp\nimport pandas as pd\nimport time\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimp.reload(lk)\n\n##############################################################################\n## define model parameters\nnum_osc=10\nmu_freq=0.0 # mean natural frequency\nsigma_freq=0.01 # std natural frequency\np_erdos_renyi=0.9 # probability of connection for erdos renyi\nrandom_seed=-1 # -1 to ignore\ncoupling_function=lambda x: np.sin(x)#+0.1*np.sin(2*(x+0.2)) # Gamma from kuramoto model\n#coupling_function=lambda x: np.sin(x-0.2)+0.1*np.cos(2*x) # Gamma from kuramoto model\n\n##############################################################################\n## define numerical solution parameters\ndt=0.1 # time step for numerical solution\ntmax=1000*dt # maximum time for numerical solution\nnoise_level=0.0 # post solution noise added\ndynamic_noise_level=0.00 # post solution noise added\nnum_repeats=1#10 # number of restarts for numerical solution\nnum_attempts=1#5 # number of times to attempt to learn from data for each network\nnum_networks=1#10 # number of different networks for each parameter value\nmethod='euler' #'rk2','rk4','euler',\nwith_vel=False\n## Note: the loop parameter value will overwrite the value above\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfor network in range(1,num_networks+1):\n## create parameter dictionaries\n system_params={'w': lk.random_natural_frequencies(num_osc,mu=mu_freq,sigma=sigma_freq,seed=random_seed),\n 'A': lk.random_erdos_renyi_network(num_osc,p_value=p_erdos_renyi,seed=random_seed),\n 'K': 1.0,\n 'Gamma': coupling_function,\n 'other': str(parameter),\n #'IC': np.random.rand(num_osc)*np.pi*2, # fixed initial condition for each repeat\n 'IC': {'type': 'reset', # reset (set phase to 0) or random\n 'selection': 'fixed', #fixed or random\n 'num2perturb': 1, # integer used only when selection is random\n 'indices': [0], # list of integers, used only when selection='fixed' \n 'size': 2, # float, used only when type='random'\n 'IC': 0*np.random.rand(num_osc)*np.pi*2} # initical condition for first repeat\n }\n \n solution_params={'dt':dt,\n 'tmax':tmax,\n 'noise': noise_level,\n 'dynamic noise': dynamic_noise_level,\n 'ts_skip': 1, # don't skip timesteps\n 'num_repeats': num_repeats\n }\n \n learning_params={'learning_rate': 0.005,\n 'n_epochs': 300, #400\n 'batch_size':500,#500,\n 'n_oscillators':num_osc,\n 'dt': dt,\n 'n_coefficients': 20,\n 'reg':0.0001,\n 'prediction_method': method,\n 'velocity_fit': with_vel\n }\n t=np.arange(0,tmax,dt)[:-1].reshape(-1,1)\n phases,vel=lk.generate_data_vel(system_params,solution_params)\n n_ts=t.shape[0]\n \n \n figsize=(12,4)\n fontsize=16\n plt.figure(figsize=figsize) \n for rep in range(num_repeats):\n \n cur_t=t+rep*tmax\n cur_phases=phases[rep*n_ts:(rep+1)*n_ts]\n #lk.plot_ode_results(t,phases[rep*n_ts:(rep+1)*n_ts],figsize=(20,5),fontsize=16)\n R,Psi=lk.get_op(cur_phases)\n plt.subplot(1,3,1)\n plt.plot(cur_t,cur_phases)\n plt.title('Phases',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel('phases',fontsize=fontsize)\n plt.subplot(1,3,2)\n plt.plot(cur_t,R,'b')\n plt.title('Order parameter',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel('R(t)=|Z(t)|',fontsize=fontsize)\n plt.ylim(0,1.1)\n plt.subplot(1,3,3)\n plt.plot(cur_t,Psi,'b')\n plt.title('Order parameter',fontsize=fontsize)\n plt.xlabel('time',fontsize=fontsize)\n plt.ylabel(r'$\\Psi(t)=arg(Z(t))$',fontsize=fontsize)\n plt.ylim(-np.pi,np.pi)\n if rep>=1:\n for subplot in range(1,4):\n ax=plt.subplot(1,3,subplot)\n ylim=ax.get_ylim()\n ax.axvline(x=rep*tmax,ymin=ylim[0],ymax=ylim[1],color='k',linestyle='--')\n plt.show()\n", "repo_name": "mpanaggio/coupled_oscillator_network_model_reconstruction", "sub_path": "test_data_generation.py", "file_name": "test_data_generation.py", "file_ext": "py", "file_size_in_byte": 4489, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "61", "api": [{"api_name": "importlib.reload", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 25, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 43, "usage_type": "call"}, {"api_name": "learn_kuramoto_files.random_natural_frequencies", "line_number": 47, "usage_type": "call"}, {"api_name": "learn_kuramoto_files.random_erdos_renyi_network", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 79, "usage_type": "call"}, {"api_name": "learn_kuramoto_files.generate_data_vel", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "learn_kuramoto_files.get_op", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 109, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "59773328", "text": "#PROJECT: MODELING TORONTO BIKESHARE NETWORK\r\n\r\n#Notes: \r\n\r\n#station info JSON: https://tor.publicbikesystem.net/ube/gbfs/v1/en/station_information\r\n\r\n#--------------------------------------------------#\r\n\r\n#1) IMPORT LIBRARIES\r\n\r\n#Computation and Structuring:\r\n\r\nimport pandas as pd\r\nimport json\r\nfrom pandas.io.json import json_normalize\r\n\r\n#Modeling:\r\n\r\nimport networkx as nx\r\n\r\n#Visualization:\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n#--------------------------------------------------#\r\n\r\n#1) DATA IMPORT AND PREP\r\n\r\n#First we load the node data from a JSON file containing all of the station's in the Toronto bike network:\r\n\r\n#The JSON was in a deep embedded format and not working with Pandas read_json, so needed to take a more manual approach (i.e. can't use pd.read_json):\r\n\r\ndef unpack_json(filename):\r\n \"\"\"function to unpack the JSON file format provided by the Toronto bikeshare network \"\"\"\r\n \r\n with open(filename) as json_file: \r\n inter_data = json.load(json_file)\r\n \r\n inter_data = json_normalize(inter_data['data'])\r\n inter_data = list(inter_data.values.flatten()) #creates a list of a list of dictionaries\r\n inter_data = inter_data[0] #unpacks so it is a list of dictionaries since all data data was in a list object at index[0]\r\n inter_data_df = pd.DataFrame(inter_data) #convert the list of dictionaires into a df, which is now properly formatted\r\n \r\n return inter_data_df\r\n\r\nnode_data_function = unpack_json('station_info.json') #gets information on station's and locations\r\nnode_data_final = node_data_function[['address','capacity','lat','lon','name','station_id']] #only keep relevant columns, this is our final cleaned node data set we can use to build the graph\r\n\r\n#Now we load the edge data, which consists of an excel file with ride level data:\r\n\r\nedge_data = pd.read_excel('2016_Bike_Share_Toronto_Ridership_Q4.xlsx')\r\n\r\n#clean edge data and join to station id information from the node_data file:\r\n\r\ndef clean_edge_data(df1, df2):\r\n \"\"\"cleans and reformats the edge data set so that node information is included\"\"\"\r\n \r\n edge_data_final = pd.merge(df1,df2[['name','station_id']].rename(columns={'name':'from_station_name'}),how='left',on='from_station_name') #add station_id from the node data to the trip level data \r\n edge_data_final = edge_data_final.rename(columns={'station_id':'station_id_from'}) #rename station_id column\r\n edge_data_final = pd.merge(edge_data_final,df2[['name','station_id']].rename(columns={'name':'to_station_name'}),how='left',on='to_station_name') #add station_id from the node data to the trip level data \r\n edge_data_final = edge_data_final.rename(columns={'station_id':'station_id_to'}) #rename station_id column\r\n edge_data_final = edge_data_final.dropna(subset=['station_id_to', 'station_id_from']) #drops edges where station id info is missing\r\n edge_data_final['station_id_from'] = pd.to_numeric(edge_data_final['station_id_from'], downcast='integer') #match to format of station_id in node data set\r\n edge_data_final['station_id_to'] = pd.to_numeric(edge_data_final['station_id_to'], downcast='integer') #match to format of station_id in node data set\r\n \r\n return edge_data_final\r\n\r\nedge_data_final2 = clean_edge_data(edge_data, node_data_final) #creates final cleaned edge data set ready for creating the network\r\n\r\n#--------------------------------------------------#\r\n\r\n#2) Structure the Bikeshare network as a NetworkX Graph:\r\n\r\nNG = nx.MultiDiGraph() #creates empty directed graph\r\n\r\n#create nodes in the graph from station_id and give them a position that is equal to their lat-lon coordinates\r\n\r\nfor i, j, k in zip(node_data_final['station_id'], node_data_final['lon'], node_data_final['lat']):\r\n NG.add_node(i,pos=(j,k)) #iterates through the node data file to and \r\n\r\npos= nx.get_node_attributes(NG, 'pos') #set position attribute for drawing\r\nprint(pos) #check the dictionary format is correct\r\n\r\n#loop through the edge pairs and add to graph:\r\nfor i, j in zip(edge_data_final2['station_id_from'], edge_data_final2['station_id_to']):\r\n NG.add_edge(i,j) #iterates through edge_data and adds edges to the graph\r\n \r\n#--------------------------------------------------#\r\n\r\n#3) Analysis and Visualization: \r\n \r\n#Some high level stats for the network:\r\n \r\nprint('# of edges: {}'.format(NG.number_of_edges())) #~147k\r\nprint('# of nodes: {}'.format(NG.number_of_nodes())) #336 nodes, matches number of stations\r\nprint(NG.degree(node_data_final['station_id'])) #look at most important nodes in network\r\nprint(nx.in_degree_centrality(NG)) #computes the in-degree centrality for nodes in the directed network\r\nprint(nx.out_degree_centrality(NG)) #coputes the out-degree centrality for nodes in the directed network\r\n\r\n#visualization of the network in physical space (using the lat-lon coordinate attributes):\r\n\r\nplt.axis('off')\r\nnx.draw(NG,pos,node_size=20,node_color='blue',alpha=0.5,width=0.5)\r\n\r\n\r\n", "repo_name": "7cb15/Modeling-Toronto-Bikeshare-Network", "sub_path": "BikeShareModeling.py", "file_name": "BikeShareModeling.py", "file_ext": "py", "file_size_in_byte": 4953, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.io.json.json_normalize", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 64, "usage_type": "call"}, {"api_name": "networkx.MultiDiGraph", "line_number": 74, "usage_type": "call"}, {"api_name": "networkx.get_node_attributes", "line_number": 81, "usage_type": "call"}, {"api_name": "networkx.in_degree_centrality", "line_number": 97, "usage_type": "call"}, {"api_name": "networkx.out_degree_centrality", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "networkx.draw", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "20752848565", "text": "from functools import lru_cache\nfrom fastapi import FastAPI, UploadFile, File, Depends, HTTPException\nfrom fastapi import responses\nfrom fastapi.responses import FileResponse, JSONResponse\nfrom typing import List, Optional\nimport pytesseract\nimport pathlib\nfrom os import getcwd\nimport os\nimport io\nimport uuid\nimport shutil\nfrom PIL import Image\nimport sys\nimport logging\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseSettings, BaseModel\nfrom random import randint\n\napp = FastAPI()\n\norigins = [\n \"http://localhost:3000\",\n]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\npytesseract.pytesseract.tesseract_cmd ='C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n\n\nBASE_DIR = pathlib.Path(__file__).parent\nUPLOAD_DIR = BASE_DIR / \"uploads\"\n\nclass Settings(BaseSettings):\n debug: bool = False\n echo_active: bool = False\n\n class Config:\n env_file = \".env\"\n\nclass PredictionResponse(BaseModel):\n filename: str\n contentype: Optional[str] = None \n likely_class: Optional[str] = None\n\n@lru_cache\ndef get_settings():\n return Settings()\n\nsettings = get_settings()\nDEBUG = settings.debug\n\nprint(DEBUG)\n\n@app.post(\"/upload\", response_class=FileResponse, responses={200: {\"Description\": \"Uploading Images\"}})\nasync def upload_file(file: UploadFile = File(...), settings: Settings=Depends(get_settings)):\n if not settings.echo_active:\n raise HTTPException(detail=\"Invalid endpoint\", status_code=400)\n UPLOAD_DIR.mkdir(exist_ok=True)\n bytes_str = io.BytesIO(await file.read())\n #img = Image.open(bytes_str) #opencv can be used here, also called cv2\n try:\n img = Image.open(bytes_str)\n except:\n raise HTTPException(detail=\"Invalid image\", status_code=400)\n fname = pathlib.Path(file.filename)\n fext = fname.suffix # .jpg, .txt\n dest = UPLOAD_DIR / f\"{file.filename}\"\n # {uuid.uuid1()}{fext}\n with open(str(dest), 'wb') as out:\n out.write(bytes_str.read())\n img.save(dest)\n print(settings.debug)\n return dest\n\n@app.post(\"/predictions\") # http POST\nasync def prediction_view(file:UploadFile = File(...), settings:Settings = Depends(get_settings)):\n \n try:\n contents = await file.read()\n image = Image.open(io.BytesIO(contents)).convert('RGB')\n\n predicted_class = pytesseract.image_to_string(image)\n predictions = [x for x in predicted_class.split(\"\\n\")]\n \n logging.info(f\"Predicted Class: {predictions}\")\n\n # --\n\n bytes_str = io.BytesIO(contents)\n try:\n img = Image.open(bytes_str)\n except:\n raise HTTPException(detail=\"Invalid image\", status_code=400)\n \n try:\n img.save(getcwd() + f\"/images/{file.filename}\")\n except FileExistsError:\n pass\n\n # --\n\n # Save to file\n \n try:\n my_file_location = getcwd() + f\"/images/{file.filename}.txt\"\n my_file = open(my_file_location, \"w\")\n\n print(\"begin write\")\n\n for text in predictions:\n my_file.write(f\"{str(text)}\\n\")\n\n print(\"END WRITE\")\n my_file.close()\n print(\"close success\")\n\n except Exception as e:\n print(\"error\", e)\n \n # ---\n\n\n return {\n \"filename\": file.filename, \n \"contentype\": file.content_type, \n \"likely_class\": predictions,\n \"text_link\": f\"http://127.0.0.1:8000/file/{file.filename}.txt\",\n \"link\": f\"http://127.0.0.1:8000/file/{file.filename}\"\n }\n except Exception as error:\n logging.exception(error)\n e = sys.exc_info()[1]\n raise HTTPException(status_code=500, detail=str(e))\n\n\n@app.post(\"/predict/\", response_model=PredictionResponse)\nasync def predict(file: UploadFile = File(...)): \n # if file.content_type.startswith('/images/') is False:\n # raise HTTPException(status_code=400, detail=f'File \\'{file.filename}\\' is not an image.') \n\n try:\n contents = await file.read()\n image = Image.open(io.BytesIO(contents)).convert('RGB')\n\n predicted_class = pytesseract.image_to_string(image)\n predictions = [x for x in predicted_class.split(\"\\n\")]\n \n logging.info(f\"Predicted Class: {predictions}\")\n return {\n \"filename\": file.filename, \n \"contentype\": file.content_type, \n \"likely_class\": predicted_class,\n }\n except Exception as error:\n logging.exception(error)\n e = sys.exc_info()[1]\n raise HTTPException(status_code=500, detail=str(e))\n\n@app.post(\"/uploads\")\nasync def upload_files(file: UploadFile = File(...)):\n with open(file.filename, 'wb') as image:\n content = await file.read()\n image.write(content)\n image.close()\n return JSONResponse(content={\"filename\": file.filename},\nstatus_code=200)\n\n@app.post(\"/img\")\nasync def upload_img(files: List[UploadFile] = File(...)):\n # UPLOAD_DIR.mkdir(exist_ok=True)\n for img in files:\n with open(f'{img.filename}', \"wb\") as buffer:\n shutil.copyfileobj(img.file, buffer)\n\n return {\"file_name\" : \"Images Uploaded\"}\n\n@app.post(\"/upload-file/\")\nasync def create_upload_file(uploaded_file: UploadFile = File(...)):\n print(\"execute\")\n\n file_location = f\"images/{uploaded_file.filename}\"\n with open(file_location, \"wb+\") as file_object:\n shutil.copyfileobj(uploaded_file.file, file_object) \n return {\"info\": f\"file '{uploaded_file.filename}' saved at '{file_location}'\",\n \"link\": f\"http://127.0.0.1:8000/file/{uploaded_file.filename}\" }\n\n@app.get(\"/images/\")\nasync def read_random_file():\n\n # get a random file from the image directory\n files = os.listdir(UPLOAD_DIR)\n random_index = randint(0, len(files) - 1)\n\n path = f\"{UPLOAD_DIR}{files[random_index]}\"\n \n # notice you can use FileResponse now because it expects a path\n return FileResponse(path)\n\n@app.get(\"/file/{name_file}\")\ndef get_file(name_file: str):\n return FileResponse(path=getcwd() + \"/images/\" + name_file)\n\n\n\"\"\"\n@app.post(\"/test/\")\nasync def get_file(uploaded_file: UploadFile = File(...)):\n print(\"receive\", uploaded_file.filename)\n return JSONResponse({\"state\": \"success\"})\n\"\"\"\n", "repo_name": "Angelvicks/vision-ai", "sub_path": "Backend/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "fastapi.FastAPI", "line_number": 20, "usage_type": "call"}, {"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 26, "usage_type": "argument"}, {"api_name": "pytesseract.pytesseract", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "call"}, {"api_name": "pydantic.BaseSettings", "line_number": 39, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 49, "usage_type": "name"}, {"api_name": "functools.lru_cache", "line_number": 51, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 61, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 61, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 61, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 63, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 68, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 70, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 71, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 60, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 82, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 82, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 82, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 86, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 86, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 86, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 91, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 95, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 97, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 99, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 102, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 137, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 138, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 139, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 143, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 143, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 149, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 149, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 149, "usage_type": "call"}, {"api_name": "pytesseract.image_to_string", "line_number": 151, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 154, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 161, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 162, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 163, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 166, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 166, "usage_type": "call"}, {"api_name": "fastapi.responses.JSONResponse", "line_number": 171, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 175, "usage_type": "name"}, {"api_name": "fastapi.UploadFile", "line_number": 175, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 175, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 179, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 184, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 184, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 189, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 197, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 198, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 203, "usage_type": "call"}, {"api_name": "fastapi.responses.FileResponse", "line_number": 207, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 207, "usage_type": "call"}]} +{"seq_id": "8545020554", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.9.1\n# kernelspec:\n# display_name: hcp7t_fv_sleep_env\n# language: python\n# name: hcp7t_fv_sleep_env\n# ---\n\n# # Data Quality Assurance - Part 1\n#\n# This notebook will perform the following steps:\n#\n# 1. Load a list of subjects of interest (i.e., those with at least one resting-state scan at 7T)\n# 2. Load motion estimates and compute Framewise Displacement (saves FD to disk on each run folder)\n# 3. Attempt loading of ET files for each run (and mark those that are defective)\n# 4. Construct a dataframe with the following information per run: correct number of TRs, correct spatial resolution, correct number of volumes, ET available, ET can be loaded\n#\n# A summary of this QA is saved to disk in ${RESOURCES_DIR}/QA_Part1_Results.pkl\n# ***\n\n# +\n# %%time\nfrom utils.basics import get_7t_subjects, load_motion_info\nfrom utils.variables import RUNS, DATA_DIR, ProjectFiles_DF_Path, QA1_Results_DF_Path\nfrom utils.ParseEyeLinkAsc import ParseEyeLinkAsc\n\nimport numpy as np\nimport pandas as pd\nimport os.path as osp\nimport nibabel as nib\n\nVERBOSE=False\n# -\n\n# *** \n# ## 1. Check the Dataframe with information about available files\n\nProjectFiles_DF = pd.read_pickle(ProjectFiles_DF_Path)\nprint('++ INFO: Shape of Project Files_DF is %s' % str(ProjectFiles_DF.shape))\n\nprint('++ INFO: Number of Runs with ET(asc) file available: %d Runs' % (ProjectFiles_DF.shape[0] - ProjectFiles_DF['ET_ASC'].isna().sum()))\nprint('++ INFO: Number of Runs with ET(csv) file available: %d Runs' % (ProjectFiles_DF.shape[0] - ProjectFiles_DF['ET_CSV'].isna().sum()))\n\n# ***\n# ## 2. Load List of Subjects of interest\n\n# Load List of Subjects with at least one resting-state scan\nsbjs = get_7t_subjects()\nprint('++ Number of available subjects: %d' % len(sbjs))\n\n# ***\n# ## 4. Load Motion Information and Compute FrameWise Displacement\n# This will generate a file per run with the traces of framewise displacepment for that particular run\n\n# %%time\n# Load Motion Information for all subjects available and create FD data frame for each run\nmot_df = load_motion_info(sbjs, write_FD=True, fillnan=False, verbose=VERBOSE)\n\nprint('++ INFO: Shape of mot_df is %s' % str(mot_df.shape))\nmot_df.head()\n\n# ***\n# ## 5. Check the Integrity of Eye Tracker Data Files & See if FD is low\n#\n# Unfortunately, not all eye tracking data files can be loaded properly. \n#\n# During this initial QA, we will test whether or not a given ET file (e.g., that of one run) can be properly loaded or not\n#\n# In addition we will also store the previously computed Mean and Max Framewise Displacement\n\n# +\n# %%time\n# Create Eamty DataFrame with the following columns:\n# * Sbj = Subject ID\n# * Run = Run ID\n# * Dir Avail = Does the directory for this run exists on our system?\n# * Mot Avail = Is the motion file for this run available on our system?\n# * ET Avail = Are both ET files for this run available on our system?\n# * ET_OK = Are we able to load (e.g., file is uncorrupted) the main ET File\ndf = pd.DataFrame(columns=['Sbj','Run','Dir Avail','Mot Avail','ET Avail', 'ET_OK'])\n\n# For all subjects\nfor s,sbj in enumerate(sbjs):\n # For all possible runs\n for run in RUNS:\n # Create the path to this run directory (should it exists)\n drun_path = osp.join(DATA_DIR,str(sbj),run)\n if osp.exists(drun_path):\n # Create the path to the motion file (should it exists)\n mot_path = osp.join(drun_path,'{run}_Movement_Regressors.txt'.format(run=run))\n # Create the path to the \n et_asc_path = osp.join(drun_path,'{run}_eyetrack.asc'.format(run=run))\n et_csv_path = osp.join(drun_path,'{run}_eyetrack_summary.csv'.format(run=run))\n # Try loading the ET file without causing any type of exception\n if osp.exists(et_asc_path):\n try:\n dfTrial,dfMsg,dfFix,dfSacc,dfBlink,dfSamples = ParseEyeLinkAsc(et_asc_path)\n et_ok = True\n except: # If there was any issue (e.g., an exception), then set et_ok to False\n et_ok = False\n # Update the dataframe with the information about this run\n df = df.append({'Sbj':sbj,\n 'Run':run,\n 'Dir Avail':osp.exists(drun_path),\n 'Mot Avail':osp.exists(mot_path),\n 'ET Avail':osp.exists(et_asc_path ) & osp.exists(et_csv_path),\n 'ET_OK': et_ok}, \n ignore_index=True)\n if VERBOSE:\n print('INFO: Just finsished with subject {sbj} run {run}'.format(sbj=sbj, run=run))\n else: \n print('WARNING: Subject {sbj} run {run} does not exists'.format(sbj=sbj, run=run))\ndf = df.infer_objects()\n# -\n\n# ***\n# ## 6. Check the spatial resolution and length of the scans\n\nrun_list = [str(row['Sbj'])+'_'+row['Run'] for r,row in df.iterrows() ]\n\n# %%time\ndf['Spatial Resolution OK'] = None\ndf['Nacq OK'] = None\ndf['TR OK'] = None\nprint('++ INFO: Number of items to iter [%d]' % len(run_list))\nprint(' + ',end='')\nfor i,item in enumerate(run_list):\n sbj,run = item.split('_',1)\n file_path = osp.join(DATA_DIR,sbj,run,run+'_mPP.nii.gz')\n if np.mod(i,50)==0:\n print('%i..' % i, end='')\n if not osp.exists(file_path):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = False\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = False\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = False\n else:\n file_img = nib.load(file_path)\n [dx, dy, dz, tr] = file_img.header.get_zooms()\n \n if np.isclose(dx,1.60) & np.isclose(dx,1.60) & np.isclose(dz,1.60):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Spatial Resolution OK')] = False\n \n if np.isclose(tr,1.0):\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'TR OK')] = False\n \n if file_img.shape[3] == 900:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = True\n else:\n df.loc[((df['Sbj']==sbj) & (df['Run']==run),'Nacq OK')] = False\nprint('')\ndf.head()\n\nprint(\"++ INFO: Number of Runs with directory available: %d\" % df[df['Dir Avail']==True].shape[0])\nprint(\"++ INFO: Number of Runs with ET available: %d\" % df[df['ET Avail']==True].shape[0])\nprint(\"++ INFO: Number of Runs with ET OK: %d\" % df[df['ET_OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with correct spatial resolution: %d\" % df[df['Spatial Resolution OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with correct number of acquisitions: %d\" % df[df['Nacq OK']==True].shape[0])\nprint(\"++ INFO: Number of Runs with expected TR: %d\" % df[df['TR OK']==True].shape[0])\nprint(\"++ ===============================================================\")\nprint(\"++ INFO: Number of Runs with all controls OK: %d\" % df[(df['Dir Avail']==True) & \n (df['ET Avail']==True) & \n (df['ET_OK']==True) & \n (df['Spatial Resolution OK']==True) &\n (df['Nacq OK']==True) &\n (df['TR OK']==True)].shape[0])\n\n# ***\n# ## Save the summary of this first QA part to disk\n\ndf.to_pickle(QA1_Results_DF_Path)\n\nprint('++ INFO: Number of runs missing ET files = %d RUNS' % (df[df['ET Avail']==False].shape[0]))\nprint('++ INFO: Number of runs with ET files available but unreadable = %d RUNS' % (df[df['ET_OK']==False].shape[0]))\n\n# ***\n#\n# ### Clean up space\n#\n# Scans that will not be used becuase the ET is not available will be removed from disk\n\ndf = pd.read_pickle(QA1_Results_DF_Path)\n\ndf = df[df['ET Avail']==False]\n\ncommand_file = open('./N01_QA_RemoveScansWithBadET.sh','w+')\nfor r,row in df.iterrows():\n command_file.write('rm -rf /data/SFIMJGC_HCP7T/HCP7T/{sbj}/{run} \\n'.format(sbj=row['Sbj'],run=row['Run']))\ncommand_file.close()\n", "repo_name": "nimh-sfim/hcp7t_fv_sleep", "sub_path": "Notebooks/N01_QA.py", "file_name": "N01_QA.py", "file_ext": "py", "file_size_in_byte": 8746, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_pickle", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.variables.ProjectFiles_DF_Path", "line_number": 45, "usage_type": "argument"}, {"api_name": "utils.basics.get_7t_subjects", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.basics.load_motion_info", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 87, "usage_type": "call"}, {"api_name": "utils.variables.RUNS", "line_number": 92, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "utils.variables.DATA_DIR", "line_number": 94, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 94, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "name"}, {"api_name": "utils.ParseEyeLinkAsc.ParseEyeLinkAsc", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "utils.variables.DATA_DIR", "line_number": 136, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 136, "usage_type": "name"}, {"api_name": "numpy.mod", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "name"}, {"api_name": "nibabel.load", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.isclose", "line_number": 152, "usage_type": "call"}, {"api_name": "utils.variables.QA1_Results_DF_Path", "line_number": 181, "usage_type": "argument"}, {"api_name": "pandas.read_pickle", "line_number": 192, "usage_type": "call"}, {"api_name": "utils.variables.QA1_Results_DF_Path", "line_number": 192, "usage_type": "argument"}]} +{"seq_id": "14677063857", "text": "import argparse\nimport numpy as np\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nimport matplotlib.pyplot as plt\nimport cv2\nimport PIL.Image\n\n# from models import *\nimport models\n\n# Prune settings\nparser = argparse.ArgumentParser(description='PyTorch Slimming CIFAR prune')\nparser.add_argument('--dataset', type=str, default='cifar10',\n help='training dataset (default: cifar10)')\nparser.add_argument('--val-batch-size', type=int, default=256, metavar='N',\n help='input batch size for validatin (default: 256)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--depth', type=int, default=16,\n help='depth of the vgg')\nparser.add_argument('--arch', default='vgg_16', type=str,\n help='architecture to use')\n# parser.add_argument('--model', default='', type=str, metavar='PATH',\n# help='path to the model (default: none)')\nparser.add_argument('--save', default='./cleanresult/1/EB-30-29.pth.tar', type=str, metavar='PATH',\n help='path to save pruned model (default: none)')\nparser.add_argument('--save_1', default='./poisonresult_2/2/EB-30-28.pth.tar', type=str, metavar='PATH',\n help='path to save pruned model (default: none)')\n\n# parser.add_argument('--save_2', default='./poisonresult_2/3/EB-30-27.pth.tar', type=str, metavar='PATH',\n# help='path to save pruned model (default: none)')\n# parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='manual start epoch number')\n# parser.add_argument('--end_epoch', default=160, type=int, metavar='N', help='manual end epoch number')\n\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\nprint('Experiment Starting... Check critical information below carefully!')\nprint('Training Phase: Calculate Difference of Two Masks;')\nprint('Dataset:{};'.format(args.dataset))\n# print('Dataset:{};\\tStart Epoch:{};\\tEnd Epoch:{};'.format(args.dataset, args.start_epoch, args.end_epoch)) #\nprint('Network Architecture:{};\\tDepth:{};'.format(args.arch, args.depth)) #\nprint('First Mask Path:{};'.format(args.save))\nprint('Second Mask Path:{};'.format(args.save_1))\nprint('-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\n\nsetting_perc = 0.3\n\nif not os.path.exists(args.save):\n os.makedirs(args.save)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)\nmodel_bd = models.__dict__[args.arch](dataset=args.dataset, depth=args.depth)\n\nif args.cuda:\n model.cuda()\n model_bd.cuda()\n\n\ndef pruning(model, percent):\n total = 0\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n total += m.weight.data.shape[0]\n\n bn = torch.zeros(total)\n index = 0\n for m in model.modules():\n if isinstance(m, nn.BatchNorm2d):\n size = m.weight.data.shape[0]\n bn[index:(index + size)] = m.weight.data.abs().clone()\n index += size\n\n y, i = torch.sort(bn)\n thre_index = int(total * percent)\n thre = y[thre_index]\n mask2 = bn.gt(thre).float().view(-1)\n return mask2\n\n\ndef get_mask(path: str, default_percent=0.3):\n print(f'==> Mask from {path} ... ')\n checkpoint = torch.load(path)\n best_epoch = checkpoint['epoch']\n print('EarlyBird Emerging Epoch: ', best_epoch)\n model.load_state_dict(checkpoint['state_dict'])\n percent = 0.3 if 'EB-30' in path else 0.5 if 'EB-50' in path else 0.7 if 'EB-70' in path else default_percent\n mask = pruning(model, percent)\n print('Remanent Percent: {}%.\\n'.format(int(torch.sum(mask == 1) * 100. / mask.size(0))))\n return mask\n\n# get clean EB\nprint('==> resumeing from {} ... '.format(args.save))\ncheckpoint = torch.load(args.save)\nbest_epoch = checkpoint['epoch']\nprint('EarlyBird Emerging Epoch: ', best_epoch)\nmodel.load_state_dict(checkpoint['state_dict'])\n\n# get backdoor EB and mask\nprint('==> resumeing from {} ... '.format(args.save_1))\ncheckpoint_bd = torch.load(args.save_1)\nbest_epoch_bd = checkpoint_bd['epoch']\nprint('EarlyBird Emerging Epoch: ', best_epoch_bd)\nmodel_bd.load_state_dict(checkpoint_bd['state_dict'])\npercent_2 = 0.3 if 'EB-30' in args.save_1 else 0.5 if 'EB-50' in args.save_1 else 0.7 if 'EB-70' in args.save_1 else setting_perc\nbest_mask_bd = pruning(model_bd, percent_2)\n\nX = []\nY = []\n\nfor percent_set in np.arange(0.3, 1, 0.05): # [0.3, 0.35, 0.4, ... , 1]:\n X.append(percent_set)\n print(\"\\nclean prune precent:\", percent_set)\n best_mask = pruning(model, percent_set) # get clean mask /key neurons\n\n in_num = 0\n for i in range(best_mask.size(0)):\n if best_mask[i] == 1 and best_mask_bd[i] == 1:# key neuron exists in both cl & bd\n in_num += 1 \n Y.append(in_num / int(torch.sum(best_mask)))\n print(\"both exist percent:\", in_num / int(torch.sum(best_mask)))\nprint(X)\nprint(Y)", "repo_name": "zeyuanyin/LTH-Backdoor", "sub_path": "plot/key_neuron_rate.py", "file_name": "key_neuron_rate.py", "file_ext": "py", "file_size_in_byte": 5281, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "61", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.__dict__", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.__dict__", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 78, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.sort", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "72831479234", "text": "\"\"\"\nFor description of the script, see the README.md\n\"\"\"\n\nimport numpy as np\nfrom printind.printind_function import printi\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\n\n\nclass ComputeForce(object):\n def __init__(self, verbose=0, rho=1000.0, added_AOA_camber_deg=0.0, radius_sphere=0.010):\n self.verbose = verbose\n self.rho = rho\n self.added_AOA_camber_deg = added_AOA_camber_deg\n self.radius_sphere = radius_sphere\n\n def set_vertical_velocity(self, vertical_velocity):\n self.vertical_velocity = vertical_velocity\n\n def set_angular_frequency(self, angular_frequency):\n self.angular_frequency = angular_frequency\n self.angular_rate = 2 * np.pi * self.angular_frequency\n\n def set_interpolator_Cl(self, interpolator_Cl):\n self.interpolator_Cl = interpolator_Cl\n\n def set_interpolator_Cd(self, interpolator_Cd):\n self.interpolator_Cd = interpolator_Cd\n\n def set_seed_profile(self, wing_instance):\n self.wing_instance = wing_instance\n\n def set_wing_pitch(self, wing_pitch):\n self.wing_pitch = wing_pitch\n\n def set_wing_chord(self, wing_chord):\n self.wing_chord = wing_chord\n\n def compute_force_all_elements(self):\n self.list_crrt_R = []\n self.list_projected_vertical_velocity = []\n self.list_AOA_deg = []\n self.list_total_velocity = []\n self.list_wind_angle_rad = []\n self.list_drag = []\n self.list_lift = []\n self.list_Cd = []\n self.list_Cl = []\n self.list_forward_force = []\n self.list_vertical_force = []\n self.list_forward_force_lift = []\n self.list_forward_force_drag = []\n self.list_vertical_force_lift = []\n self.list_vertical_force_drag = []\n self.list_base_coeff = []\n self.list_crrt_rotation_velocity = []\n self.list_forward_moments = []\n\n for (crrt_size, crrt_R, crrt_phi) in zip(self.wing_instance.list_size_element, self.wing_instance.list_R, self.wing_instance.list_phi):\n # horizontal velocity due to rotation of the seed\n crrt_rotation_velocity = self.angular_rate * crrt_R\n\n # angle of attack\n projected_vertical_velocity = self.vertical_velocity * np.cos(crrt_phi * np.pi / 180.0)\n wind_angle_rad = np.arctan2(projected_vertical_velocity, crrt_rotation_velocity)\n crrt_AOA_deg = wind_angle_rad * 180.0 / np.pi - self.wing_pitch\n\n # total velocity magnitude\n total_velocity = np.sqrt(crrt_rotation_velocity**2 + projected_vertical_velocity**2)\n\n # base coefficient for computation of lift and drag\n base_coeff = 0.5 * self.rho * (total_velocity**2) * self.wing_chord * crrt_size\n\n # compute lift and drag; careful about the orientation of crrt_AOA_deg! This is because of\n # direction of rotation vs. the sketches\n Cd = self.interpolator_Cd.return_interpolated(crrt_AOA_deg + self.added_AOA_camber_deg)\n Cl = self.interpolator_Cl.return_interpolated(crrt_AOA_deg + self.added_AOA_camber_deg)\n crrt_lift = base_coeff * Cl\n crrt_drag = base_coeff * Cd\n crrt_forward = np.sin(wind_angle_rad) * crrt_lift \\\n - np.cos(wind_angle_rad) * crrt_drag\n crrt_vertical = np.cos(wind_angle_rad) * crrt_lift \\\n + np.sin(wind_angle_rad) * crrt_drag\n\n crrt_forward_projected = crrt_forward\n crrt_vertical_projected = crrt_vertical * np.cos(crrt_phi * np.pi / 180.0)\n\n self.list_crrt_R.append(crrt_R)\n self.list_projected_vertical_velocity.append(projected_vertical_velocity)\n self.list_crrt_rotation_velocity.append(crrt_rotation_velocity)\n self.list_wind_angle_rad.append(wind_angle_rad)\n self.list_total_velocity.append(total_velocity)\n self.list_AOA_deg.append(crrt_AOA_deg)\n self.list_base_coeff.append(base_coeff)\n self.list_drag.append(crrt_drag)\n self.list_lift.append(crrt_lift)\n self.list_Cd.append(Cd)\n self.list_Cl.append(Cl)\n self.list_forward_force.append(crrt_forward_projected)\n self.list_vertical_force.append(crrt_vertical_projected)\n self.list_forward_moments.append(crrt_forward_projected * crrt_R)\n\n def compute_resultant_force(self):\n self.resultant_forward_moment = sum(self.list_forward_moments)\n self.resultant_vertical_force = sum(self.list_vertical_force)\n\n def display_reduced_information(self, title_base=None, gray_region=None):\n linewidth = 3.0\n color = (0.5, 0.5, 0.5, 0.5)\n \n \n \n # figure with profile and angle of attack ------------------------------\n fig, ax1 = plt.subplots()\n \n if gray_region is not None:\n rect = Rectangle((gray_region[0], -0.002), gray_region[1] - gray_region[0], 0.06, color=color)\n ax1.add_patch(rect)\n \n ax1.plot(self.wing_instance.list_R, self.wing_instance.list_h, 'b', linewidth=linewidth * 2.0, label=\"seed wing\", linestyle=\"-\")\n ax1.set_xlabel('R [m]')\n # Make the y-axis label, ticks and tick labels match the line color.\n ax1.set_ylabel('h [m]', color='b')\n ax1.tick_params('y', colors='b')\n plt.ylim([-0.002, 0.052])\n plt.xlim([-0.002, 0.052])\n\n ax2 = ax1.twinx()\n ax2.plot(self.wing_instance.list_R, self.list_AOA_deg, 'r', linewidth=linewidth, linestyle=\"--\", label=\"Angle of attack\")\n ax2.set_ylabel('local angle of attack [deg]', color='r')\n ax2.tick_params('y', colors='r')\n \n ax2.plot([], [], 'b', linewidth=linewidth, linestyle=\"-\", label=\"seed wing\")\n \n plt.legend(loc=\"lower right\")\n\n fig.tight_layout()\n \n plt.savefig(title_base + \"combined_fig_1.pdf\")\n # done -----------------------------------------------------------------\n \n \n \n # figure with vertical force and moment --------------------------------\n fig, ax1 = plt.subplots()\n \n if gray_region is not None:\n rect = Rectangle((gray_region[0], -0.5), gray_region[1] - gray_region[0], 1.0, color=color)\n ax1.add_patch(rect)\n \n ax1.plot(self.wing_instance.list_R, np.array(self.list_vertical_force) / np.array(self.wing_instance.list_size_element), 'b', linewidth=linewidth, linestyle=\"-\", label=\"vertical force\")\n ax1.plot([0.0, 0.049], [0.0, 0.0], 'k', linewidth=linewidth * 0.75)\n ax1.set_xlabel('R [m]')\n # Make the y-axis label, ticks and tick labels match the line color.\n ax1.set_ylabel('Vertical force distribution [N/m]', color='b')\n ax1.tick_params('y', colors='b')\n plt.ylim([-0.3, 0.3])\n plt.xlim([0.00, 0.049])\n\n ax2 = ax1.twinx()\n ax2.plot(self.wing_instance.list_R, np.array(self.list_forward_moments) / np.array(self.wing_instance.list_size_element), 'r', linewidth=linewidth, linestyle=\"--\", label=\"forward moment\")\n ax2.set_ylabel('Forward moment distribution [N.m / m]', color='r')\n ax2.tick_params('y', colors='r')\n plt.ylim([-0.0044, 0.0045])\n \n ax2.plot([], [], 'b', linewidth=linewidth, linestyle=\"-\", label=\"vertical force\")\n \n plt.legend(loc=\"lower left\")\n\n fig.tight_layout()\n \n plt.savefig(title_base + \"combined_fig_2.pdf\")\n # done -----------------------------------------------------------------\n \n \n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, np.array(self.list_vertical_force) / np.array(self.wing_instance.list_size_element), linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"Vertical force distribution [N/m]\")\n plt.ylim([-0.3, 0.4])\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_forceDistribution.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, np.array(self.list_forward_force) / np.array(self.wing_instance.list_size_element), linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.ylim([-0.2, 0.1])\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"Forward force distribution [N/m]\")\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_forwardForceDistribution.pdf\")\n plt.show()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, np.array(self.list_forward_moments) / np.array(self.wing_instance.list_size_element), linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"Forward moment distribution [N.m / m]\")\n plt.ylim([-0.008, 0.004])\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_momentDistribution.pdf\")\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n if gray_region is not None:\n rect = Rectangle((gray_region[0], -100), gray_region[1] - gray_region[0], 200, color=color)\n ax.add_patch(rect)\n plt.plot(self.wing_instance.list_R, self.list_AOA_deg, linewidth=linewidth)\n # plt.legend()\n plt.grid()\n plt.xlabel(\"R [m]\")\n plt.ylabel(\"local angle of attack [deg]\")\n plt.ylim([-20, 50])\n plt.tight_layout()\n if title_base is not None:\n plt.savefig(title_base + \"_AOADistribution.pdf\")\n \n \n \n \n \n plt.show()\n\n def return_forward_moment(self):\n return(self.resultant_forward_moment)\n\n def return_vertical_force(self):\n return(self.resultant_vertical_force)\n\n def display_all_results(self):\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_AOA) * 180.0 / np.pi, label=\"AOA\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_wind_angle) * 180.0 / np.pi, label=\"wind angle\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_total_velocity, label=\"total_velocity\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_crrt_rotation_velocity, label=\"crrt_rotation_velocity\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_Cl, label=\"Cl\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_Cd, label=\"Cd\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_base_coeff, label=\"base_coeff\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_lift, label=\"lift\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_drag, label=\"drag\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_lift) / np.array(self.list_drag), label=\"lift/drag\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, self.list_forward_force_lift, label=\"forward lift\")\n plt.plot(self.wing_instance.list_R, self.list_forward_force_drag, label=\"forward drag\")\n plt.plot(self.wing_instance.list_R, self.list_vertical_force_lift, label=\"vertical lift\")\n plt.plot(self.wing_instance.list_R, self.list_vertical_force_drag, label=\"vertical drag\")\n plt.legend()\n plt.plot()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_vertical_force) / np.array(self.wing_instance.list_size_element), label=\"Vertical force\")\n plt.legend()\n plt.show()\n\n plt.figure()\n plt.plot(self.wing_instance.list_R, np.array(self.list_forward_force) / np.array(self.wing_instance.list_size_element), label=\"Forward force\")\n plt.legend()\n plt.show()\n\n\n\"\"\"\nnote that in compute_force_on_seed, both the rotation and the vertical velocity are positive for 'normal seed'. I.e.\npositive vertical velocity means seed going down, positive frequency means seed rotating as in experiments.\n\"\"\"\n", "repo_name": "jerabaul29/EffectFoldAngleAutorotatingSeeds", "sub_path": "model/compute_force_on_seed.py", "file_name": "compute_force_on_seed.py", "file_ext": "py", "file_size_in_byte": 13161, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "numpy.pi", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 87, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 253, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 258, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 262, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 262, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 264, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 300, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 303, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 307, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 307, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}]} +{"seq_id": "19365984025", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport os\nimport socket\nimport psana\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom pickle import dump\n\nfrom benchmarking import Event,\\\n set_defaults,\\\n event_here, start, stop, log, event_log\n\n\n\n\n#\n# PSANA2 BENCHMARK, based on CCTBX's XTC_PROCESS pipeline.\n# COMMENT: I've started with cctbx_project/xfel/xtc_process.py and stripped\n# out all the things that I don't think are relevant to this benchmark\n#\n\n\n\n\n@log\ndef get_calib_file_path(env, address, run):\n \"\"\" Findes the path to the SLAC metrology file stored in a psana\n environment object's calibration store\n @param env psana environment object\n @param address address string for a detector\n @param run psana run object or run number\n \"\"\"\n\n from psana import Detector\n\n\n #\n # try to get it from the detector interface\n #\n\n try:\n start(\"load geometry from detector\")\n psana_det = Detector(address, run.env())\n ret = psana_det.pyda.geoaccess(run.run()).path\n stop(\"load geometry from detector\")\n\n return ret\n except Exception as e:\n pass\n\n\n #\n # try to get it from the calib store directly\n #\n\n from psana import ndarray_uint8_1, Source\n\n start(\"load geometry from calib store\")\n cls = env.calibStore()\n src = Source('DetInfo(%s)'%address)\n path_nda = cls.get(ndarray_uint8_1, src, 'geometry-calib')\n stop(\"load geometry from calib store\")\n\n if path_nda is None:\n return None\n return ''.join(map(chr, path_nda))\n\n\n\n@log\ndef env_dxtbx_from_slac_metrology(run, address):\n \"\"\" Loads a dxtbx cspad cbf header only object from the metrology path\n stored in a psana run object's calibration store\n @param env psana run object\n @param address address string for a detector\n \"\"\"\n\n start(\"load geometry data from detector\")\n det = run.Detector(address)\n geometry = det.raw.geometry()\n stop(\"load geometry data from detector\")\n\n if geometry is None:\n metro_path = get_calib_file_path(run.env(), address, run)\n elif geometry.valid:\n metro_path = None\n else:\n raise RuntimeError(f\"Could not read geometry, hostname: {socket.gethostname()}\")\n\n if metro_path is None and geometry is None:\n return None\n\n\n return None\n\n\n\n@log\ndef get_psana_corrected_data(psana_det, evt, use_default=False, dark=True,\n common_mode=None, apply_gain_mask=True,\n gain_mask_value=None, per_pixel_gain=False,\n gain_mask=None, additional_gain_factor=None):\n \"\"\"\n Given a psana Detector object, apply corrections as appropriate and return\n the data from the event\n @param psana_det psana Detector object\n @param evt psana event\n @param use_default If true, apply the default calibration only, using the\n psana algorithms. Otherise, use the corrections specified by the rest of\n the flags and values passed in.\n @param dark Whether to apply the detector dark, bool or numpy array\n @param common_mode Which common mode algorithm to apply. None: apply no\n algorithm. Default: use the algorithm specified in the calib folder.\n Otherwise should be a list as specified by the psana documentation for\n common mode customization\n @param apply_gain_mask Whether to apply the common mode gain mask correction\n @param gain_mask_value Multiplier to apply to the pixels, according to the\n gain mask\n @param per_pixel_gain If available, use the per pixel gain deployed to the\n calibration folder\n @param gain_mask gain mask showing which pixels to apply gain mask value\n @param additional_gain_factor Additional gain factor. Pixels counts are\n divided by this number after all other corrections.\n @return Numpy array corrected as specified.\n \"\"\"\n\n # order is pedestals, then common mode, then gain mask, then per pixel gain\n\n # HACK: Force psana v2 behaviour\n PSANA2_VERSION = True\n\n\n start(\"psana_det.raw\")\n if PSANA2_VERSION:\n # in psana2, data are stored as raw, fex, etc so the selection\n # has to be given here when the detector interface is used.\n # for now, assumes cctbx uses \"raw\".\n psana_det = psana_det.raw\n stop(\"psana_det.raw\")\n\n\n if use_default:\n start(\"psana_det.calib\")\n ret = psana_det.calib(evt) # applies psana's complex run-dependent calibrations\n stop(\"psana_det.calib\")\n return ret\n\n\n start(\"psana_det.raw_data(evt)\")\n data = psana_det.raw_data(evt)\n stop(\"psana_det.raw_data(evt)\")\n if data is None:\n return\n\n\n start(\"subtract psana_det.pedestals()\")\n data = data.astype(np.float64)\n if isinstance(dark, bool):\n if dark:\n if PSANA2_VERSION:\n data -= psana_det.pedestals()\n else:\n data -= psana_det.pedestals(evt)\n elif isinstance( dark, np.ndarray ):\n data -= dark\n stop(\"subtract psana_det.pedestals()\")\n\n\n if common_mode is not None and common_mode != \"default\":\n print(\"Applying common mode\")\n\n start(\"psana_det.common_mode_apply(data, common_mode)\")\n if common_mode == 'cspad_default':\n common_mode = (1,25,25,100,1) # default parameters for CSPAD images\n psana_det.common_mode_apply(data, common_mode)\n elif common_mode == 'unbonded':\n common_mode = (5,0,0,0,0) # unbonded pixels used for correction\n psana_det.common_mode_apply(data, common_mode)\n else: # this is how it was before.. Though I think common_mode would need to be a tuple..\n psana_det.common_mode_apply(data, common_mode)\n stop(\"psana_det.common_mode_apply(data, common_mode)\")\n else:\n print(\"Not applying common mode\")\n \n\n if apply_gain_mask:\n print(\"Applying gain mask\")\n\n start(\"apply gain mask\")\n if gain_mask is None: # TODO: consider try/except here\n gain_mask = psana_det.gain_mask(evt) == 1\n if gain_mask_value is None:\n try:\n gain_mask_value = psana_det._gain_mask_factor\n except AttributeError:\n print(\"No gain set for psana detector, using gain value of 1, consider disabling gain in your phil file\")\n gain_mask_value = 1\n data[gain_mask] = data[gain_mask]*gain_mask_value\n stop(\"apply gain mask\")\n else:\n print(\"Not applying gain mask\")\n\n\n if per_pixel_gain: # TODO: test this\n start(\"applying psana_det.gain()\")\n data *= psana_det.gain()\n stop(\"applying psana_det.gain()\")\n\n\n if additional_gain_factor is not None:\n data /= additional_gain_factor\n\n\n return data\n\n\n\n@log\ndef process_event(run, evt, psana_det):\n \"\"\"\n Process a single event from a run\n @param run psana run object\n @param timestamp psana timestamp object\n \"\"\"\n\n\n # HACK: Force psana v2 behaviour\n PSANA2_VERSION = True\n\n start(\"construct event timestamp\")\n if PSANA2_VERSION:\n sec = evt._seconds\n nsec = evt._nanoseconds\n else:\n time = evt.get(psana.EventId).time()\n fid = evt.get(psana.EventId).fiducials()\n sec = time[0]\n nsec = time[1]\n\n ts = Event.as_timestamp(sec, nsec/1e6)\n stop(\"construct event timestamp\")\n\n print(\"Accepted\", ts)\n\n # HACK: these parameters have been extracted from a xtc_process run\n data = get_psana_corrected_data(psana_det, evt, use_default=False,\n dark=True, common_mode=None,\n apply_gain_mask=True, gain_mask_value=6.85,\n per_pixel_gain=False,\n additional_gain_factor=None)\n\n\n if data is None:\n print(\"ERROR! No data\")\n return\n\n\n timestamp = t = ts\n s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]\n print(\"Loaded shot\", s)\n\n \n\n@log\ndef test_xtc_read(ds, comm, det_name):\n\n for run in ds.runs():\n\n start(f\"run.Detector({ds.det_name})\")\n det = run.Detector(ds.det_name)\n stop(f\"run.Detector({ds.det_name})\")\n\n # TODO: fix flex dependency\n # if comm.Get_rank() == 0:\n # PS_CALIB_DIR = os.environ.get('PS_CALIB_DIR')\n # assert PS_CALIB_DIR\n # dials_mask = easy_pickle.load(params.format.cbf.invalid_pixel_mask)\n # else:\n # dials_mask = None\n # dials_mask = comm.bcast(dials_mask, root=0)\n\n start(\"for evt in run.events()\")\n for evt in run.events():\n env_dxtbx_from_slac_metrology(run, det_name)\n\n process_event(run, evt, det)\n stop(\"for evt in run.events()\")\n\n\n\n\nif __name__ == \"__main__\":\n\n # Defaul data\n default_parameters = {\n \"exp\" : \"cxid9114\",\n \"run\" : 1,\n \"dir\" : \"/img/data/xtc_test\",\n \"max_events\" : 0,\n \"det_name\" : \"cspad\"\n }\n\n\n # Input args allowed by psana.DataSource\n psana_args = [\"exp\", \"run\", \"dir\", \"max_events\", \"det_name\", \"batch_size\"]\n\n\n #\n # Parse input arguments\n #\n\n parser = ArgumentParser()\n\n for arg in psana_args:\n parser.add_argument(f\"--{arg}\", help=\"psana.DataSource kwarg\")\n\n parser.add_argument(\"--of\",\n help=\"Log dir -- every rank will write its own log file\")\n\n # Get args dict, and sanitize None types\n args = vars(parser.parse_args())\n\n output_name = args[\"of\"]\n del args[\"of\"] # don't pass this to psana\n\n psana_kwargs = set_defaults(args, default_parameters)\n\n\n\n #\n # Initialize MPI\n #\n\n start(\"INIT MPI\")\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n stop(\"INIT MPI\")\n\n rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed\n\n\n #\n # Run Benchmark\n #\n\n if rank == 0:\n print(\"MPI Initialized, Running xtc_read Benchmark\")\n\n start(f\"psana.DataSource({psana_kwargs})\")\n ds = psana.DataSource(**psana_kwargs)\n stop(f\"psana.DataSource({psana_kwargs})\")\n\n test_xtc_read(ds, comm, psana_kwargs[\"det_name\"])\n\n\n #\n # Save log files\n #\n\n if rank == 0:\n print(\"Writing logs\")\n\n log_path = os.path.join(output_name, f\"debug_{rank}.txt\")\n with open(log_path, \"w\") as f:\n for entry in event_log(cctbx_fmt=True):\n print(entry, file=f)\n", "repo_name": "JBlaschke/psana2_benchmarks", "sub_path": "opt/benchmark_xtc_read.py", "file_name": "benchmark_xtc_read.py", "file_ext": "py", "file_size_in_byte": 10502, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "benchmarking.start", "line_number": 45, "usage_type": "call"}, {"api_name": "psana.Detector", "line_number": 46, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 48, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 61, "usage_type": "call"}, {"api_name": "psana.Source", "line_number": 63, "usage_type": "call"}, {"api_name": "psana.ndarray_uint8_1", "line_number": 64, "usage_type": "argument"}, {"api_name": "benchmarking.stop", "line_number": 65, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 28, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 81, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 84, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 91, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 73, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 136, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 142, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 146, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 148, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 152, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 154, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 167, "usage_type": "attribute"}, {"api_name": "benchmarking.stop", "line_number": 169, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 175, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 184, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 192, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 202, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 208, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 210, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 101, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 233, "usage_type": "call"}, {"api_name": "psana.EventId", "line_number": 238, "usage_type": "attribute"}, {"api_name": "psana.EventId", "line_number": 239, "usage_type": "attribute"}, {"api_name": "benchmarking.Event.as_timestamp", "line_number": 243, "usage_type": "call"}, {"api_name": "benchmarking.Event", "line_number": 243, "usage_type": "name"}, {"api_name": "benchmarking.stop", "line_number": 244, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 221, "usage_type": "name"}, {"api_name": "benchmarking.start", "line_number": 272, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 274, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 285, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 290, "usage_type": "call"}, {"api_name": "benchmarking.log", "line_number": 267, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 315, "usage_type": "call"}, {"api_name": "benchmarking.set_defaults", "line_number": 329, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 337, "usage_type": "call"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 339, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 339, "usage_type": "name"}, {"api_name": "benchmarking.stop", "line_number": 340, "usage_type": "call"}, {"api_name": "benchmarking.start", "line_number": 352, "usage_type": "call"}, {"api_name": "psana.DataSource", "line_number": 353, "usage_type": "call"}, {"api_name": "benchmarking.stop", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path", "line_number": 366, "usage_type": "attribute"}, {"api_name": "benchmarking.event_log", "line_number": 368, "usage_type": "call"}]} +{"seq_id": "40041528526", "text": "\"\"\"train_config.py: Parse training arguments and create config dictionnary.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport sys\nimport six\n\nfrom nmt_chainer.utilities import argument_parsing_tools\n\nlogging.basicConfig()\nlog = logging.getLogger(\"rnns:train_config\")\nlog.setLevel(logging.INFO)\n\n_CONFIG_SECTION_TO_DESCRIPTION = {\"model\": \"Model Description\",\n \"training\": \"Training Parameters\",\n \"training_management\": \"Training Management and Monitoring\"}\n\n\ndef define_parser(parser):\n parser.add_argument(\"data_prefix\", nargs=\"?\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"prefix of the training data created by make_data.py\")\n parser.add_argument(\"save_prefix\", nargs=\"?\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"prefix to be added to all files created during the training\")\n\n model_description_group = parser.add_argument_group(_CONFIG_SECTION_TO_DESCRIPTION[\"model\"])\n model_description_group.add_argument(\"--Ei\", type=int, default=640, help=\"Source words embedding size.\")\n model_description_group.add_argument(\"--Eo\", type=int, default=640, help=\"Target words embedding size.\")\n model_description_group.add_argument(\"--Hi\", type=int, default=1024, help=\"Source encoding layer size.\")\n model_description_group.add_argument(\"--Ho\", type=int, default=1024, help=\"Target hidden layer size.\")\n model_description_group.add_argument(\"--Ha\", type=int, default=1024, help=\"Attention Module Hidden layer size.\")\n model_description_group.add_argument(\"--Hl\", type=int, default=512, help=\"Maxout output size.\")\n model_description_group.add_argument(\"--encoder_cell_type\", default=\"lstm\", help=\"cell type of encoder. format: type,param1:val1,param2:val2,...\") # where type is in [%s]\"%(\" \".join(rnn_cells.cell_dict.keys())))\n model_description_group.add_argument(\"--decoder_cell_type\", default=\"lstm\", help=\"cell type of decoder. format same as for encoder\")\n model_description_group.add_argument(\"--lexical_probability_dictionary\", help=\"lexical translation probabilities in zipped JSON format. Used to implement https://arxiv.org/abs/1606.02006\")\n model_description_group.add_argument(\"--lexicon_prob_epsilon\", default=1e-3, type=float, help=\"epsilon value for combining the lexical probabilities\")\n model_description_group.add_argument(\"--use_deep_attn\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--use_accumulated_attn\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--init_orth\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--use_bn_length\", default=0, type=int)\n model_description_group.add_argument(\"--use_goto_attention\", default=False, action=\"store_true\")\n \n model_description_group.add_argument(\"--use_ff_model\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--ff_d_model\", type=int, default=512, help=\"FF model d_model\")\n model_description_group.add_argument(\"--ff_n_heads\", type=int, default=8, help=\"FF model number of attention heads\")\n model_description_group.add_argument(\"--ff_nb_layers_src\", type=int, default=6, help=\"FF model number of source layers\")\n model_description_group.add_argument(\"--ff_nb_layers_tgt\", type=int, default=6, help=\"FF model number of target layers\")\n model_description_group.add_argument(\"--ff_dropout\", type=float, help=\"FF model dropout\")\n model_description_group.add_argument(\"--ff_d_ff\", type=int, default=2048, help=\"FF model d_ff\")\n model_description_group.add_argument(\"--ff_use_exp_relu\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--ff_residual_mode\", default=\"normal\", choices=\"normal none after\".split())\n model_description_group.add_argument(\"--ff_no_normalize\", default=False, action=\"store_true\")\n model_description_group.add_argument(\"--use_own_layer_normalization\", default=False, action=\"store_true\")\n \n training_paramenters_group = parser.add_argument_group(_CONFIG_SECTION_TO_DESCRIPTION[\"training\"])\n training_paramenters_group.add_argument(\"--mb_size\", type=int, default=64, help=\"Minibatch size\")\n training_paramenters_group.add_argument(\"--nb_batch_to_sort\", type=int, default=20, help=\"Sort this many batches by size.\")\n training_paramenters_group.add_argument(\"--noise_on_prev_word\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--l2_gradient_clipping\", type=float, default=1, help=\"L2 gradient clipping. 0 for None\")\n training_paramenters_group.add_argument(\"--hard_gradient_clipping\", type=float, nargs=2, help=\"hard gradient clipping.\")\n training_paramenters_group.add_argument(\"--weight_decay\", type=float, help=\"Weight decay value. \")\n training_paramenters_group.add_argument(\"--optimizer\", choices=[\"sgd\", \"rmsprop\", \"rmspropgraves\",\n \"momentum\", \"nesterov\", \"adam\", \"scheduled_adam\", \"adagrad\", \"adadelta\"],\n default=\"adam\", help=\"Optimizer type.\")\n training_paramenters_group.add_argument(\"--learning_rate\", type=float, default=0.01, help=\"Learning Rate\")\n training_paramenters_group.add_argument(\"--momentum\", type=float, default=0.9, help=\"Momentum term\")\n training_paramenters_group.add_argument(\"--randomized_data\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--no_shuffle_of_training_data\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--use_reinf\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--use_previous_prediction\", default=0, type=float)\n training_paramenters_group.add_argument(\"--curiculum_training\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--reverse_src\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--reverse_tgt\", default=False, action=\"store_true\")\n \n training_paramenters_group.add_argument(\"--use_soft_prediction_feedback\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--use_gumbel_for_soft_predictions\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--temperature_for_soft_predictions\", type=float, default=1.0)\n\n\n training_paramenters_group.add_argument(\"--dynamic_batching\", default=False, action=\"store_true\")\n training_paramenters_group.add_argument(\"--dynamic_batching_max_elems\", type=int, default=10000)\n training_paramenters_group.add_argument(\"--dynamic_batching_nb_sent_to_sort\", type=int, default=5000)\n \n training_paramenters_group.add_argument(\"--load_initial_source_embeddings\")\n training_paramenters_group.add_argument(\"--load_initial_target_embeddings\")\n\n training_monitoring_group = parser.add_argument_group(_CONFIG_SECTION_TO_DESCRIPTION[\"training_management\"])\n training_monitoring_group.add_argument(\"--config\", help=\"load a training config file\")\n training_monitoring_group.add_argument(\"--data_prefix\", dest=\"data_prefix\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"same as positional argument --data_prefix\")\n training_monitoring_group.add_argument(\"--save_prefix\", dest=\"save_prefix\",\n action=argument_parsing_tools.ArgumentActionNotOverwriteWithNone,\n help=\"same as positional argument --save_prefix\")\n training_monitoring_group.add_argument(\"--gpu\", type=int, help=\"specify gpu number to use, if any\")\n training_monitoring_group.add_argument(\"--load_model\", help=\"load the parameters of a previously trained model\")\n training_monitoring_group.add_argument(\"--load_optimizer_state\", help=\"load previously saved optimizer states\")\n training_monitoring_group.add_argument(\"--load_trainer_snapshot\", help=\"load previously saved trainer states\")\n training_monitoring_group.add_argument(\"--use_memory_optimization\", default=False, action=\"store_true\",\n help=\"Experimental option that could strongly reduce memory used.\")\n training_monitoring_group.add_argument(\"--max_nb_iters\", type=int, default=None, help=\"maximum number of iterations\")\n training_monitoring_group.add_argument(\"--max_nb_epochs\", type=int, default=None, help=\"maximum number of epochs\")\n training_monitoring_group.add_argument(\"--max_src_tgt_length\", type=int, help=\"Limit length of training sentences\")\n training_monitoring_group.add_argument(\"--report_every\", type=int, default=200, help=\"report every x iterations\")\n training_monitoring_group.add_argument(\"--no_resume\", default=False, action=\"store_true\")\n training_monitoring_group.add_argument(\"--no_report_or_save\", default=False, action=\"store_true\")\n training_monitoring_group.add_argument(\"--sample_every\", default=200, type=int)\n training_monitoring_group.add_argument(\"--save_ckpt_every\", default=4000, type=int)\n training_monitoring_group.add_argument(\"--save_initial_model_to\", help=\"save the initial model parameters to given file in npz format\")\n training_monitoring_group.add_argument(\"--reshuffle_every_epoch\", default=False, action=\"store_true\", help=\"reshuffle training data at the end of each epoch\")\n training_monitoring_group.add_argument(\"--resume\", default=False, action=\"store_true\", help=\"resume training from checkpoint config\")\n training_monitoring_group.add_argument(\"--timer_hook\", default=False, action=\"store_true\", help=\"activate timer hook for profiling\")\n training_monitoring_group.add_argument(\"--force_overwrite\", default=False, action=\"store_true\", help=\"Do not ask before overwiting existing files\")\n training_monitoring_group.add_argument(\"--description\", help=\"Optional message to be stored in the configuration file\")\n\n training_monitoring_group.add_argument(\"--set_false_in_config\", nargs=\"*\", help=\"Forcing some options to be false\")\n \n training_monitoring_group.add_argument(\"--update_old_config_file_with_default_values\", \n default=False, action=\"store_true\", help=\"When using older config files\")\n\n training_monitoring_group.add_argument(\"--generate_computation_graph\", help=\"will generate computation graph of the first loss computed\")\n\n training_monitoring_group.add_argument(\"--disable_cudnn_softmax\", default=False, action=\"store_true\")\n training_monitoring_group.add_argument(\"--use_chainerx\", default=False, action=\"store_true\", help=\"use chainerx\")\n\nclass CommandLineValuesException(Exception):\n pass\n\n#\n# def load_training_config_file(filename):\n# file_content = json.load(open(filename))\n\n\ndef get_parse_option_orderer():\n description_to_config_section = dict((v, k) for (k, v) in six.iteritems(_CONFIG_SECTION_TO_DESCRIPTION))\n por = argument_parsing_tools.ParseOptionRecorder(group_title_to_section=description_to_config_section,\n ignore_positional_arguments=set([\"save_prefix\", \"data_prefix\"]))\n define_parser(por)\n return por\n\n\ndef convert_cell_string(config_training, no_error=False):\n import nmt_chainer.models.rnn_cells_config\n\n try:\n if \"encoder_cell_type\" in config_training[\"model\"] and config_training[\"model\"][\"encoder_cell_type\"] is not None:\n config_training[\"model\"][\"encoder_cell_type\"] = nmt_chainer.models.rnn_cells_config.create_cell_config_from_string(\n config_training[\"model\"][\"encoder_cell_type\"])\n\n if \"decoder_cell_type\" in config_training[\"model\"] and config_training[\"model\"][\"decoder_cell_type\"] is not None:\n config_training[\"model\"][\"decoder_cell_type\"] = nmt_chainer.models.rnn_cells_config.create_cell_config_from_string(\n config_training[\"model\"][\"decoder_cell_type\"])\n except BaseException:\n if not no_error:\n raise\n\n\ndef load_config_train(filename, readonly=True, no_error=False):\n config = argument_parsing_tools.OrderedNamespace.load_from(filename)\n if \"metadata\" not in config: # older config file\n parse_option_orderer = get_parse_option_orderer()\n config_training = parse_option_orderer.convert_args_to_ordered_dict(config[\"command_line\"], args_is_namespace=False)\n\n convert_cell_string(config_training, no_error=no_error)\n\n assert \"data\" not in config_training\n config_training[\"data\"] = argument_parsing_tools.OrderedNamespace()\n config_training[\"data\"][\"data_fn\"] = config[\"data\"]\n config_training[\"data\"][\"Vi\"] = config[\"Vi\"]\n config_training[\"data\"][\"Vo\"] = config[\"Vo\"]\n config_training[\"data\"][\"voc\"] = config[\"voc\"]\n\n assert \"metadata\" not in config_training\n config_training[\"metadata\"] = argument_parsing_tools.OrderedNamespace()\n config_training[\"metadata\"][\"config_version_num\"] = 0.9\n config_training[\"metadata\"][\"command_line\"] = None\n config_training[\"metadata\"][\"knmt_version\"] = None\n config = config_training\n elif config[\"metadata\"][\"config_version_num\"] != 1.0:\n raise ValueError(\"The config version of %s is not supported by this version of the program\" % filename)\n\n # Compatibility with intermediate verions of config file\n if \"data_prefix\" in config and \"data_prefix\" not in config[\"training_management\"]:\n config[\"training_management\"][\"data_prefix\"] = config[\"data_prefix\"]\n del config[\"data_prefix\"]\n\n if \"train_prefix\" in config and \"train_prefix\" not in config[\"training_management\"]:\n config[\"training_management\"][\"train_prefix\"] = config[\"train_prefix\"]\n del config[\"train_prefix\"]\n\n if readonly:\n config.set_readonly()\n return config\n\n\ndef find_which_command_line_arguments_were_given(argument_list):\n pwndan = argument_parsing_tools.ParserWithNoneDefaultAndNoGroup()\n define_parser(pwndan)\n args_given_set = pwndan.get_args_given(argument_list)\n return args_given_set\n\n\ndef make_config_from_args(args, readonly=True):\n config_base = None\n if args.config is not None:\n log.info(\"loading training config file %s\", args.config)\n config_base = load_config_train(args.config, readonly=False)\n\n if args.set_false_in_config is not None:\n for option_name in args.set_false_in_config:\n path_option = option_name.split(\".\")\n last_dict = config_base\n for level in six.moves.range(len(path_option) -1):\n last_dict = config_base[path_option[level]]\n last_dict[path_option[-1]] = False\n \n\n parse_option_orderer = get_parse_option_orderer()\n config_training = parse_option_orderer.convert_args_to_ordered_dict(args)\n\n convert_cell_string(config_training)\n\n if config_base is not None:\n args_given_set = find_which_command_line_arguments_were_given(\n args.__original_argument_list)\n for argname in set(args_given_set):\n if getattr(args, argname) is None:\n args_given_set.remove(argname)\n\n print(\"args_given_set\", args_given_set)\n config_base.update_recursive(config_training, valid_keys=args_given_set, add_absent_keys=args.update_old_config_file_with_default_values)\n config_training = config_base\n else:\n assert \"data\" not in config_training\n assert \"metadata\" not in config_training\n\n# config_data_fn = config_training[\"data_prefix\"] + \".data.config\"\n\n if config_training[\"training_management\"][\"data_prefix\"] is None or config_training[\"training_management\"][\"save_prefix\"] is None:\n raise CommandLineValuesException(\"save_prefix and data_prefix need to be set either on the command line or in a config file\")\n\n config_training.add_metadata_infos(version_num=1, overwrite=args.config is not None)\n\n if readonly:\n config_training.set_readonly()\n\n return config_training\n\n\n# def load_config_train(filename, readonly = True):\n# config_as_ordered_dict = json.load(open(filename), object_pairs_hook=OrderedDict)\n#\n# config = OrderedNamespace.load_from(filename)\n# if \"metadata\" not in config_as_ordered_dict: # older config file\n# parse_option_orderer = get_parse_option_orderer()\n# config_training = parse_option_orderer.convert_args_to_ordered_dict(config_as_ordered_dict[\"command_line\"], args_is_namespace = False)\n#\n# assert \"data\" not in config_training\n# config_training[\"data\"] = argument_parsing_tools.OrderedNamespace()\n# config_training[\"data\"][\"data_fn\"] = config_as_ordered_dict[\"data\"]\n# config_training[\"data\"][\"Vi\"] = config_as_ordered_dict[\"Vi\"]\n# config_training[\"data\"][\"Vo\"] = config_as_ordered_dict[\"Vo\"]\n# config_training[\"data\"][\"voc\"] = config_as_ordered_dict[\"voc\"]\n#\n# assert \"metadata\" not in config_training\n# config_training[\"metadata\"] = argument_parsing_tools.OrderedNamespace()\n# config_training[\"metadata\"][\"config_version_num\"] = 0.9\n# config_training[\"metadata\"][\"command_line\"] = None\n# config_training[\"metadata\"][\"knmt_version\"] = None\n# elif config_as_ordered_dict[\"metadata\"][\"config_version_num\"] == 1.0:\n# argument_parsing_tools.OrderedNamespace.convert_to_ordered_namespace(config_as_ordered_dict)\n# config_training = config_as_ordered_dict\n# else:\n# raise ValueError(\"The config version of %s is not supported by this version of the program\" % filename)\n#\n# if readonly:\n# config_training.set_readonly()\n# return config_training\n\ndef command_line(arguments=None):\n import argparse\n parser = argparse.ArgumentParser(description=\"Train a RNNSearch model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n define_parser(parser)\n args = parser.parse_args(args=arguments)\n\n do_train(args)\n\n\ndef do_train(args):\n import nmt_chainer.training_module.train\n config = make_config_from_args(args, readonly=False)\n nmt_chainer.training_module.train.do_train(config)\n\n\nif __name__ == '__main__':\n command_line()\n", "repo_name": "fabiencro/knmt", "sub_path": "nmt_chainer/training_module/train_config.py", "file_name": "train_config.py", "file_ext": "py", "file_size_in_byte": 18496, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 21, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 21, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 24, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 24, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 91, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 91, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ArgumentActionNotOverwriteWithNone", "line_number": 94, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 94, "usage_type": "name"}, {"api_name": "six.iteritems", "line_number": 136, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ParseOptionRecorder", "line_number": 137, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 137, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.models.rnn_cells_config.create_cell_config_from_string", "line_number": 148, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.models", "line_number": 148, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities", "line_number": 148, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.models.rnn_cells_config.create_cell_config_from_string", "line_number": 152, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.models", "line_number": 152, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities", "line_number": 152, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace.load_from", "line_number": 160, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace", "line_number": 160, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 160, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace", "line_number": 168, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 168, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.OrderedNamespace", "line_number": 175, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 175, "usage_type": "name"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools.ParserWithNoneDefaultAndNoGroup", "line_number": 198, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.argument_parsing_tools", "line_number": 198, "usage_type": "name"}, {"api_name": "six.moves.range", "line_number": 214, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 214, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 283, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 284, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities.training_module.train.do_train", "line_number": 295, "usage_type": "call"}, {"api_name": "nmt_chainer.utilities.training_module", "line_number": 295, "usage_type": "attribute"}, {"api_name": "nmt_chainer.utilities", "line_number": 295, "usage_type": "name"}]} +{"seq_id": "39872997272", "text": "from django.contrib import admin\nfrom django.contrib.gis.admin import OSMGeoAdmin\n\n\nfrom django.views.decorators.cache import never_cache\nfrom django.contrib.admin import SimpleListFilter\nfrom .models import Resource,End_Point,Publisher,Tag,URL,Status_Log,Owner,Type,Geometry_Type,Format,Place,Named_Place, Category,Category_Keywords,Change_Log,Community_Input, Georeference_Request,URL_Type,URL\n\nfrom django.utils.safestring import mark_safe\n\nimport json\nfrom pygments import highlight\nfrom pygments.lexers import JsonLexer\nfrom pygments.formatters import HtmlFormatter\nfrom django.db import connection\n\nfrom django.contrib import messages\nfrom django.utils.translation import ngettext\n\nfrom django.http import HttpResponseRedirect\nimport resources.ingester.Delete_From_Solr as Delete_From_Solr\nimport resources.ingester.DB_ToGBL as db_to_gbl\nimport resources.ingester.Publish_ToGBL as publish_to_gbl\nfrom django.shortcuts import render\n\nimport decimal\nfrom django.contrib.gis.geos import Point, WKTWriter\nfrom django.contrib.gis.geos import GEOSGeometry\n\nimport os\nimport glob\nimport sys\nsys.setrecursionlimit(10000)\n\n# # Register the models\n# class MyModelAdmin(admin.ModelAdmin):\n# list_display = ('id', 'description')\n\nclass MyAdminSite(admin.AdminSite):\n # @never_cache\n site_header = 'Geoportal Administration'\n\nadmin_site = MyAdminSite(name='myadmin')\n\n\n# allow folder editing within the node interface\nclass URLInline(admin.StackedInline):\n model = URL\n list_display = ('url', 'url_type', 'url_label', 'get_link', )\n\n\n fieldsets = [\n (None, {'fields': [('url','get_link')]}),\n (None, {'fields': [('url_type','url_label')]}),\n (None, {'fields': [('geo_reference')]}),\n ]\n readonly_fields = [\"get_link\",\"geo_reference\"]\n\n extra = 0\n\n def get_link(self, obj):\n if obj.pk:\n html = \"Go\"\n return mark_safe(html)\n else:\n return '-'\n\n get_link.short_description = (\"Link\")\n get_link.allow_tags = True\n\n def geo_reference(self, obj):\n if obj.pk and str(obj.url_type)=='image':\n corners=\"\"\n if obj.resource.bounding_box:\n points = []\n for b in obj.resource.bounding_box:\n for p in b:\n points.append(str(p[0]) + \" \" + str(p[1]))\n corners = \"&d=\"+','.join(points)\n solr_id=str(obj.resource.resource_id)+\"-\"+str(obj.resource.end_point.id)\n html = \"Open Georeferencer\"\n return mark_safe(html)\n else:\n return '-'\n\n geo_reference.short_description = (\"Geo Reference\")\n geo_reference.allow_tags = True\n\n\nclass Status_LogInline(admin.StackedInline):\n model = Status_Log\n extra = 0\n\nclass Change_LogInline(admin.StackedInline):\n model = Change_Log\n classes = ['collapse']\n # readonly_fields = ('field_name', \"date_\", \"change_type\")\n fieldsets = [\n (None, {'fields': [('field_name', \"date\", \"change_type\")]}),\n (None, {'fields': ['new']}),\n (None, {'fields': ['old']}),\n (None, {'fields': ['community_input']})\n\n ]\n extra = 0\n\nclass ParentInline(admin.StackedInline):\n model = Resource.parent.through\n fk_name = \"from_resource\" # not work \"parent_resource\" \"resource_id\", \"parent_id\", from_resource_id, to_resource_id\n classes = ['collapse']\n verbose_name = \"Parent Resource\"\n verbose_name_plural = \"Parent Resources\"\n extra = 0\n show_change_link=True\n\nclass ChildrenInline(admin.StackedInline):\n model = Resource.parent.through\n fk_name = \"to_resource\" # not work \"parent_resource\" \"resource_id\", \"parent_id\", from_resource_id, to_resource_id\n classes = ['collapse']\n verbose_name = \"Child Resource\"\n verbose_name_plural = \"Child Resources\"\n extra = 0\n show_change_link=True\n\nclass ParentFilter(admin.SimpleListFilter):\n title = 'Root Resource'\n parameter_name = 'is_parent'\n\n def lookups(self, request, model_admin):\n return (\n ('Yes', 'Yes'),\n ('No', 'No'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if value == 'Yes':\n return queryset.filter(parent=None)\n elif value == 'No':\n return queryset.exclude(parent=None)\n return queryset\n\n# @admin.register(Resource)\nclass ResourceAdmin(OSMGeoAdmin):\n list_filter = ('end_point',\"type\",\"status_type\",\"owner\",ParentFilter,\"missing\")\n search_fields = ('title','alt_title','description','resource_id',)\n list_display = ('title', 'year','end_point','get_thumb_small','type','get_category','status_type',\"child_count\",\"accessioned\")\n\n readonly_fields = ('get_thumb',\"_layer_json\",\"_raw_json\",\"get_tags\",\"get_named_places\",\"get_category\",\"child_count\",\"preview\")\n\n autocomplete_fields =(\"tag\",\"named_place\",\"owner\", \"publisher\")\n fieldsets = [\n (None, {'fields': [('resource_id','preview'),'year','temporal_coverage']}),\n (None, {'fields': [('title', 'alt_title')]}),\n (None, {'fields': ['status_type','end_point',\"missing\"]}),\n (None, {'fields': [('resource_type')]}),\n (None, {'fields': [('type', 'geometry_type', \"format\")]}),\n\n (None, {'fields': [\"get_thumb\", \"thumbnail\"]}),\n (None, {'fields': [(\"owner\", \"publisher\")]}),\n (None, {'fields': [(\"created\",\"modified\",\"accessioned\")]}),\n\n (None, {'fields': ['description']}),\n (None, {'fields': ['bounding_box']}),\n\n (None, {'fields': [\"languages\",\"category\"]}),\n (None, {'fields': [( \"get_tags\",\"tag\")]}),\n (None, {'fields': [(\"get_named_places\",\"named_place\")]}),\n\n\n (None, {'fields': [\"_raw_json\"]}),\n (None, {'fields': [\"_layer_json\"]}),\n (None, {'fields': [\"license_info\"]}),\n\n ]\n\n def child_count(self, obj=None):\n with connection.cursor() as cursor:\n cursor.execute(\"Select count(id) from resources_resource_parent where to_resource_id={};\".format(obj.id))\n\n return (cursor.fetchone()[0])\n\n\n\n def get_tags(self, obj=None):\n print(obj.tag.all())\n return \", \".join([t.name for t in obj.tag.all()])\n\n def get_named_places(self, obj=None):\n return \", \".join([p.name for p in obj.named_place.all()])\n\n def get_category(self, obj):\n return \",\".join([p.name for p in obj.category.all()])\n\n def get_thumb(self, obj=None):\n html = '\"False\"'.format(obj.thumbnail) if obj.thumbnail else \"\"\n return mark_safe(html)\n\n def get_thumb_small(self, obj=None):\n\n html = '\"False\"'.format(obj.thumbnail) if obj.thumbnail else \"\"\n return mark_safe(html)\n\n def _raw_json(self, obj=None):\n return mark_safe(get_pretty_json(obj.raw_json)) if obj.raw_json else \"\"\n\n def _layer_json(self, obj=None):\n return mark_safe(get_pretty_json(obj.layer_json)) if obj.layer_json else \"\"\n\n inlines = [\n ParentInline,\n ChildrenInline,\n URLInline,\n Status_LogInline,\n Change_LogInline\n ]\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n actions = [\"add_selected_resources_to_staging\",\"delete_selected_resources\", 'remove_selected_resources_from_index_staging']\n\n def add_selected_resources_to_staging(self, request, queryset):\n # first export\n\n directory = os.path.dirname(os.path.realpath(__file__)) + \"/ingester\"\n verbosity=1\n # clear the directory\n if os.path.exists(directory + \"/json\"):\n files = glob.glob(directory + \"/json/*\")\n if (verbosity>1):\n print(\"removing existing files from past ingest for a fresh start!\")\n\n for f in files:\n os.remove(f)\n\n #if a child is selected we should ingest the parent instead\n for r in queryset:\n # todo - need a better way than just relying upon the parent status\n r.layers = Resource.objects.filter(status_type=r.status_type, parent=r.id)\n print(\"The layers are:\", r.layers)\n # return\n # associate the children\n for r in queryset:\n #todo - need a better way than just relying upon the parent status\n r.layers = Resource.objects.filter(status_type=r.status_type,parent=r.id)\n print(\"The layers are:\",r.layers)\n\n exporter = db_to_gbl.DB_ToGBL({\n \"resources\": queryset,\n \"path\": directory + \"/\",\n \"verbosity\": verbosity\n })\n # then ingest\n publish_to_gbl.Publish_ToGBL({\n \"path\": directory + \"/json\",\n \"verbosity\": verbosity\n })\n # set status to remove from staging\n updated =queryset.update(status_type='is')\n self.message_user(request, ngettext(\n '%d resource was successfully ingested to Staging.',\n '%d resources were successfully ingested to Staging.',\n updated,\n ) % updated, messages.SUCCESS)\n\n add_selected_resources_to_staging.short_description = \"Ingest to Staging\"\n\n def remove_selected_resources_from_index_staging(self, request, queryset):\n deleter = Delete_From_Solr.Delete_From_Solr({})\n # set status to remove from staging\n updated =queryset.update(status_type='rs')\n for obj in queryset:\n # remove from solr\n print(\"DELETE---\", obj.resource_id+\"-\"+str(obj.end_point.id))\n deleter.interface.delete_one_record(\"\\\"\"+obj.resource_id+\"-\"+str(obj.end_point.id)+\"\\\"\")\n\n self.message_user(request, ngettext(\n '%d resource was successfully removed from Staging.',\n '%d resources were successfully removed from Staging.',\n updated,\n ) % updated, messages.SUCCESS)\n\n remove_selected_resources_from_index_staging.short_description = \"Remove from Staging\"\n\n def delete_selected_resources(self, request, queryset):\n\n if 'apply' in request.POST:\n # The user clicked submit on the intermediate form.\n # Perform our update action:\n # # prevent postgres from hanging - https://stackoverflow.com/questions/62439261/postgres-delete-hangs-on-a-table-with-a-self-referential-foreign-key\n with connection.cursor() as cursor:\n cursor.execute(\"ALTER TABLE resources_resource DISABLE TRIGGER ALL;\")\n\n for obj in queryset:\n print(\"WERE DELETING SOMETHING #############\")\n obj.delete()\n\n with connection.cursor() as cursor:\n cursor.execute(\"ALTER TABLE resources_resource ENABLE TRIGGER ALL;\")\n # Redirect to our admin view after our update has\n # completed with a nice little info message saying\n # our models have been updated:\n self.message_user(request,\n \" {} Resources Deleted!\".format(queryset.count()))\n return HttpResponseRedirect(request.get_full_path())\n\n return render(request,\n 'admin/delete.html',\n context={'resources':queryset})\n\n def save_model(self, request, obj, form, change):\n\n try:\n # attempt to match precision and prevent unexpected change\n # use first point as determinant\n #todo make this more robust\n first_point = str(self.model.objects.get(id=obj.id).bounding_box[0][0][0])\n precision = len(first_point[first_point.index(\".\") + 1:])\n wkt_w = WKTWriter()\n wkt_w.precision = precision\n obj.bounding_box = GEOSGeometry(wkt_w.write(obj.bounding_box))\n except:\n pass\n print(\"first point\",)\n \"\"\"pass request to save to distinguish between automation and admin\n \"\"\"\n try:\n obj.save(request.user)\n except:\n pass\n\n def preview(self, obj):\n if obj.pk:\n\n html = \"Preview\"\n return mark_safe(html)\n else:\n return '-'\n\n preview.short_description = (\"Preview\")\n preview.allow_tags = True\n\n\n\ndef get_pretty_json(_json):\n \"\"\"Function to display pretty version of our data REF: https://www.pydanny.com/pretty-formatting-json-django-admin.html\"\"\"\n # Convert the data to sorted, indented JSON\n response = json.dumps(_json, sort_keys=True, indent=2)\n\n # Get the Pygments formatter\n formatter = HtmlFormatter(style='colorful')\n # Highlight the data\n response = highlight(response, JsonLexer(), formatter)\n\n # Get the stylesheet\n return \"
\" + response+\"
\"\n\n\nadmin_site.register(Resource, ResourceAdmin)\n\nclass End_PointAdmin(OSMGeoAdmin):\n pass\n\nadmin_site.register(End_Point, End_PointAdmin)\n\nclass PublisherAdmin(OSMGeoAdmin):\n search_fields = ('name',)\n pass\nadmin_site.register(Publisher, PublisherAdmin)\n\nclass Community_InputAdmin(OSMGeoAdmin):\n list_display = [\"resource\",\"date\",\"name\", \"email\"]\n raw_id_fields = (\"resource\",)\nadmin_site.register(Community_Input, Community_InputAdmin)\n\nclass Georeference_RequestAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Georeference_Request, Georeference_RequestAdmin)\n\n\nclass OwnerAdmin(OSMGeoAdmin):\n # enable a full_name overwrite when available\n list_display=[\"name\",\"full_name\"]\n search_fields = (\"name\",\"full_name\")\n\nadmin_site.register(Owner, OwnerAdmin)\n\nclass TypeAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Type, TypeAdmin)\n\nclass Geometry_TypeAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Geometry_Type, Geometry_TypeAdmin)\n\nclass FormatAdmin(OSMGeoAdmin):\n pass\nadmin_site.register(Format, FormatAdmin)\n\n\nclass Category_KeywordsInline(admin.StackedInline):\n model = Category_Keywords.category.through\n extra = 0\n\nclass Category_KeywordsAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Category_Keywords, Category_KeywordsAdmin)\n\nclass CategoryAdmin(OSMGeoAdmin):\n inlines = [\n Category_KeywordsInline\n ]\nadmin_site.register(Category, CategoryAdmin)\n\n\nclass TagAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Tag, TagAdmin)\n\nclass PlaceAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Place, PlaceAdmin)\n\nclass Named_PlaceAdmin(OSMGeoAdmin):\n search_fields = ('name',)\nadmin_site.register(Named_Place, Named_PlaceAdmin)\n\nclass URL_TypeAdmin(OSMGeoAdmin):\n list_display = ('name', 'ref', 'service', '_class', '_method')\n\nadmin_site.register(URL_Type,URL_TypeAdmin)\n\nclass URLAdmin(OSMGeoAdmin):\n list_filter = (\"url_type\",)\n\nadmin_site.register(URL,URLAdmin)\n\n", "repo_name": "GeospatialCentroid/geoportal-manager", "sub_path": "resources/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 15060, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.admin.AdminSite", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 47, "usage_type": "name"}, {"api_name": "models.URL", "line_number": 48, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 64, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 90, "usage_type": "name"}, {"api_name": "models.Status_Log", "line_number": 91, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 94, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 94, "usage_type": "name"}, {"api_name": "models.Change_Log", "line_number": 95, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 107, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 107, "usage_type": "name"}, {"api_name": "models.Resource.parent", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 108, "usage_type": "name"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 116, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 116, "usage_type": "name"}, {"api_name": "models.Resource.parent", "line_number": 117, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 117, "usage_type": "name"}, {"api_name": "django.contrib.admin.SimpleListFilter", "line_number": 125, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 125, "usage_type": "name"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 144, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 178, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 178, "usage_type": "name"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 197, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 202, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 205, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 233, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 238, "usage_type": "call"}, {"api_name": "models.Resource.objects.filter", "line_number": 243, "usage_type": "call"}, {"api_name": "models.Resource.objects", "line_number": 243, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 243, "usage_type": "name"}, {"api_name": "models.Resource.objects.filter", "line_number": 249, "usage_type": "call"}, {"api_name": "models.Resource.objects", "line_number": 249, "usage_type": "attribute"}, {"api_name": "models.Resource", "line_number": 249, "usage_type": "name"}, {"api_name": "resources.ingester.DB_ToGBL.DB_ToGBL", "line_number": 252, "usage_type": "call"}, {"api_name": "resources.ingester.DB_ToGBL", "line_number": 252, "usage_type": "name"}, {"api_name": "resources.ingester.Publish_ToGBL.Publish_ToGBL", "line_number": 258, "usage_type": "call"}, {"api_name": "resources.ingester.Publish_ToGBL", "line_number": 258, "usage_type": "name"}, {"api_name": "django.utils.translation.ngettext", "line_number": 264, "usage_type": "call"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 268, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 268, "usage_type": "name"}, {"api_name": "resources.ingester.Delete_From_Solr.Delete_From_Solr", "line_number": 273, "usage_type": "call"}, {"api_name": "resources.ingester.Delete_From_Solr", "line_number": 273, "usage_type": "name"}, {"api_name": "django.utils.translation.ngettext", "line_number": 281, "usage_type": "call"}, {"api_name": "django.contrib.messages.SUCCESS", "line_number": 285, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 285, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 295, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 295, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 302, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 302, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 309, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 311, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.WKTWriter", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.GEOSGeometry", "line_number": 325, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 340, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 352, "usage_type": "call"}, {"api_name": "pygments.formatters.HtmlFormatter", "line_number": 355, "usage_type": "call"}, {"api_name": "pygments.highlight", "line_number": 357, "usage_type": "call"}, {"api_name": "pygments.lexers.JsonLexer", "line_number": 357, "usage_type": "call"}, {"api_name": "models.Resource", "line_number": 363, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 365, "usage_type": "name"}, {"api_name": "models.End_Point", "line_number": 368, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 370, "usage_type": "name"}, {"api_name": "models.Publisher", "line_number": 373, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 375, "usage_type": "name"}, {"api_name": "models.Community_Input", "line_number": 378, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 380, "usage_type": "name"}, {"api_name": "models.Georeference_Request", "line_number": 382, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 385, "usage_type": "name"}, {"api_name": "models.Owner", "line_number": 390, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 392, "usage_type": "name"}, {"api_name": "models.Type", "line_number": 394, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 396, "usage_type": "name"}, {"api_name": "models.Geometry_Type", "line_number": 398, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 400, "usage_type": "name"}, {"api_name": "models.Format", "line_number": 402, "usage_type": "argument"}, {"api_name": "django.contrib.admin.StackedInline", "line_number": 405, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 405, "usage_type": "name"}, {"api_name": "models.Category_Keywords.category", "line_number": 406, "usage_type": "attribute"}, {"api_name": "models.Category_Keywords", "line_number": 406, "usage_type": "name"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 409, "usage_type": "name"}, {"api_name": "models.Category_Keywords", "line_number": 411, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 413, "usage_type": "name"}, {"api_name": "models.Category", "line_number": 417, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 420, "usage_type": "name"}, {"api_name": "models.Tag", "line_number": 422, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 424, "usage_type": "name"}, {"api_name": "models.Place", "line_number": 426, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 428, "usage_type": "name"}, {"api_name": "models.Named_Place", "line_number": 430, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 432, "usage_type": "name"}, {"api_name": "models.URL_Type", "line_number": 435, "usage_type": "argument"}, {"api_name": "django.contrib.gis.admin.OSMGeoAdmin", "line_number": 437, "usage_type": "name"}, {"api_name": "models.URL", "line_number": 440, "usage_type": "argument"}]} +{"seq_id": "42268205687", "text": "import numpy as np\nimport json\nimport pdb\nimport networkx as nx\nimport time\nimport matplotlib\n\nimport networkx as nx\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nimport grandalf\nfrom grandalf.layouts import SugiyamaLayout\n\nfrom .utils import SOURCE_NODE\nfrom evaluation.cost_model import update_subplan_costs\n\nCROSS_JOIN_CARD = 19329323\n\ndef _find_all_tables(plan):\n '''\n '''\n # find all the scan nodes under the current level, and return those\n table_names = extract_values(plan, \"Relation Name\")\n alias_names = extract_values(plan, \"Alias\")\n table_names.sort()\n alias_names.sort()\n\n return table_names, alias_names\n\ndef explain_to_nx(explain):\n '''\n '''\n base_table_nodes = []\n join_nodes = []\n\n def _get_node_name(tables):\n name = \"\"\n if len(tables) > 1:\n name = str(deterministic_hash(str(tables)))[0:5]\n join_nodes.append(name)\n else:\n name = tables[0]\n if len(name) >= 6:\n # no aliases, shorten it\n name = \"\".join([n[0] for n in name.split(\"_\")])\n if name in base_table_nodes:\n name = name + \"2\"\n base_table_nodes.append(name)\n return name\n\n def _add_node_stats(node, plan):\n # add stats for the join\n G.nodes[node][\"Plan Rows\"] = plan[\"Plan Rows\"]\n if \"Actual Rows\" in plan:\n G.nodes[node][\"Actual Rows\"] = plan[\"Actual Rows\"]\n else:\n G.nodes[node][\"Actual Rows\"] = -1.0\n\n if \"Node Type\" in plan:\n G.nodes[node][\"Node Type\"] = plan[\"Node Type\"]\n total_cost = plan[\"Total Cost\"]\n G.nodes[node][\"Total Cost\"] = total_cost\n aliases = G.nodes[node][\"aliases\"]\n if len(G.nodes[node][\"tables\"]) > 1:\n children_cost = plan[\"Plans\"][0][\"Total Cost\"] \\\n + plan[\"Plans\"][1][\"Total Cost\"]\n\n # +1 to avoid cases which are very close\n # if not total_cost+1 >= children_cost:\n # print(\"aliases: {} children cost: {}, total cost: {}\".format(\\\n # aliases, children_cost, total_cost))\n # pdb.set_trace()\n G.nodes[node][\"cur_cost\"] = total_cost - children_cost\n G.nodes[node][\"node_label\"] = plan[\"Node Type\"][0]\n G.nodes[node][\"scan_type\"] = \"\"\n else:\n # FIXME: debug\n G.nodes[node][\"cur_cost\"] = total_cost\n G.nodes[node][\"node_label\"] = node\n # what type of scan was this?\n node_types = extract_values(plan, \"Node Type\")\n for i, full_n in enumerate(node_types):\n shortn = \"\"\n for n in full_n.split(\" \"):\n shortn += n[0]\n node_types[i] = shortn\n\n scan_type = \"\\n\".join(node_types)\n G.nodes[node][\"scan_type\"] = scan_type\n\n def traverse(obj):\n if isinstance(obj, dict):\n if \"Plans\" in obj:\n if len(obj[\"Plans\"]) == 2:\n # these are all the joins\n left_tables, left_aliases = _find_all_tables(obj[\"Plans\"][0])\n right_tables, right_aliases = _find_all_tables(obj[\"Plans\"][1])\n if len(left_tables) == 0 or len(right_tables) == 0:\n return\n all_tables = left_tables + right_tables\n all_aliases = left_aliases + right_aliases\n all_aliases.sort()\n all_tables.sort()\n\n if len(left_aliases) > 0:\n node0 = _get_node_name(left_aliases)\n node1 = _get_node_name(right_aliases)\n node_new = _get_node_name(all_aliases)\n else:\n node0 = _get_node_name(left_tables)\n node1 = _get_node_name(right_tables)\n node_new = _get_node_name(all_tables)\n\n # update graph\n G.add_edge(node_new, node0)\n G.add_edge(node_new, node1)\n G.edges[(node_new, node0)][\"join_direction\"] = \"left\"\n G.edges[(node_new, node1)][\"join_direction\"] = \"right\"\n\n # add other parameters on the nodes\n G.nodes[node0][\"tables\"] = left_tables\n G.nodes[node1][\"tables\"] = right_tables\n G.nodes[node0][\"aliases\"] = left_aliases\n G.nodes[node1][\"aliases\"] = right_aliases\n G.nodes[node_new][\"tables\"] = all_tables\n G.nodes[node_new][\"aliases\"] = all_aliases\n\n # TODO: if either the left, or right were a scan, then add\n # scan stats\n _add_node_stats(node_new, obj)\n\n if len(left_tables) == 1:\n _add_node_stats(node0, obj[\"Plans\"][0])\n if len(right_tables) == 1:\n _add_node_stats(node1, obj[\"Plans\"][1])\n\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n traverse(v)\n\n elif isinstance(obj, list) or isinstance(obj,tuple):\n for item in obj:\n traverse(item)\n\n G = nx.DiGraph()\n traverse(explain)\n G.base_table_nodes = base_table_nodes\n G.join_nodes = join_nodes\n return G\n\nNODE_COLORS = {}\n# NODE_COLORS[\"Hash Join\"] = 'b'\n# NODE_COLORS[\"Merge Join\"] = 'r'\n# NODE_COLORS[\"Nested Loop\"] = 'c'\n\nNODE_COLORS[\"Index Scan\"] = 'k'\nNODE_COLORS[\"Seq Scan\"] = 'k'\nNODE_COLORS[\"Bitmap Heap Scan\"] = 'k'\n\nNODE_COLORS[\"Hash\"] = 'k'\nNODE_COLORS[\"Materialize\"] = 'k'\nNODE_COLORS[\"Sort\"] = 'k'\n\n# for signifying whether the join was a left join or right join\nEDGE_COLORS = {}\nEDGE_COLORS[\"left\"] = \"k\"\nEDGE_COLORS[\"right\"] = \"k\"\n\ndef _plot_join_order_graph(G, base_table_nodes, join_nodes, pdf, title,\n fn):\n\n def format_ints(num):\n # returns the number formatted to closest 1000 + K\n return str(round(num, -3)).replace(\"000\",\"\") + \"K\"\n\n def _plot_labels(xdiff, ydiff, key, font_color, font_size):\n labels = {}\n label_pos = {}\n for k, v in pos.items():\n label_pos[k] = (v[0]+xdiff, v[1]+ydiff)\n if key in G.nodes[k]:\n if is_float(G.nodes[k][key]):\n labels[k] = format_ints(G.nodes[k][key])\n else:\n labels[k] = G.nodes[k][key]\n else:\n est_labels[k] = -1\n\n nx.draw_networkx_labels(G, label_pos, labels,\n font_size=font_size, font_color=font_color, ax=ax)\n\n fig,ax = plt.subplots(figsize=(8,7))\n NODE_SIZE = 600\n\n # FUCK fucking graphviz\n # pos = graphviz_layout(G, prog='dot')\n # pos = graphviz_layout(G, prog='dot',\n # args='-Gnodesep=0.05')\n\n # graphviz is better, but its is a bitch to install, so grandalf is also ok otherwise\n # G = G.reverse(copy=True)\n\n g = grandalf.utils.convert_nextworkx_graph_to_grandalf(G) # undocumented function\n class defaultview(object):\n w, h = 10, 10\n for v in g.V(): v.view = defaultview()\n sug = SugiyamaLayout(g.C[0])\n sug.init_all() # roots=[V[0]])\n # sug.init_all(roots=[g.V[0]],inverted_edges=[g.V[4].e_to(g.V[0])])\n # This is a bit of a misnomer, as grandalf doesn't actually come with any\n # visualization methods. This method instead calculates positions\n sug.draw() # Extracts the positions\n pos = {v.data: (v.view.xy[0], v.view.xy[1]) for v in g.C[0].sV}\n\n # ugly hacks; want to draw the graph upside down than what grandalf gives\n # us (graphviz actually gave the correct layout...)\n ys = []\n levels = {}\n leveltoy = {}\n newlevels = {}\n for k,v in pos.items():\n ys.append(v[1])\n ys.sort()\n ys = np.unique(ys)\n level = 0\n for y in ys:\n levels[y] = level\n leveltoy[level] = y\n newlevels[y] = len(ys)-1-level\n level += 1\n pos2 = {}\n for k,v in pos.items():\n lv = newlevels[v[1]]\n newy = leveltoy[lv]\n pos2[k] = (v[0], newy)\n\n pos = pos2\n\n plt.title(title)\n color_intensity = [G.nodes[n][\"cur_cost\"] for n in G.nodes()]\n vmin = min(color_intensity)\n vmax = max(color_intensity)\n # cmap = 'viridis_r'\n cmap = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"green\",\"yellow\",\"red\"])\n\n nx.draw_networkx_nodes(G, pos,\n node_size=NODE_SIZE,\n node_color = color_intensity,\n cmap = cmap,\n alpha=0.2,\n vmin=vmin, vmax=vmax,\n ax=ax)\n\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin = vmin,\n vmax=vmax))\n\n sm._A = []\n plt.colorbar(sm, alpha=0.2, fraction=0.1, pad=0.0,\n label=\"PostgreSQL Estimated Cost\")\n\n _plot_labels(0, -10, \"est_card\", \"b\", 8)\n _plot_labels(0, +10, \"true_card\", \"darkorange\", 8)\n _plot_labels(0, 0, \"node_label\", \"k\", 14)\n\n patch1 = mpatches.Patch(color='b', label='Estimated Cardinality')\n patch2 = mpatches.Patch(color='darkorange', label='True Cardinality')\n plt.legend(handles=[patch1,patch2])\n\n # TODO: shape of node based on scan types\n # _plot_labels(+25, +5, \"scan_type\", \"b\", 10)\n\n x_values, y_values = zip(*pos.values())\n x_max = max(x_values)\n x_min = min(x_values)\n x_margin = (x_max - x_min) * 0.10\n plt.xlim(x_min - x_margin, x_max + x_margin)\n\n edge_colors = []\n for edge in G.edges():\n edge_colors.append(EDGE_COLORS[G.edges[edge][\"join_direction\"]])\n\n nx.draw_networkx_edges(G, pos, width=1.0,\n alpha=1.0, arrows=False,\n edge_color=edge_colors, ax=ax)\n plt.tight_layout()\n\n if pdf is not None:\n pdf.savefig()\n elif fn is not None:\n plt.savefig(fn)\n else:\n plt.show()\n\n plt.close()\n\ndef plot_explain_join_order(explain, true_cardinalities,\n est_cardinalities, pdf, title, fn=None):\n '''\n @true_cardinalities: dict for this particular explain\n '''\n G = explain_to_nx(explain)\n for node in G.nodes():\n aliases = G.nodes[node][\"aliases\"]\n aliases.sort()\n card_key = \" \".join(aliases)\n if true_cardinalities is None:\n G.nodes[node][\"est_card\"] = G.nodes[node][\"Plan Rows\"]\n G.nodes[node][\"true_card\"] = G.nodes[node][\"Actual Rows\"]\n elif card_key in true_cardinalities:\n G.nodes[node][\"est_card\"] = est_cardinalities[card_key]\n G.nodes[node][\"true_card\"] = true_cardinalities[card_key]\n elif tuple(aliases) in true_cardinalities:\n G.nodes[node][\"est_card\"] = est_cardinalities[tuple(aliases)]\n G.nodes[node][\"true_card\"] = true_cardinalities[tuple(aliases)]\n else:\n # unknown, might be a cross-join?\n G.nodes[node][\"est_card\"] = CROSS_JOIN_CARD\n G.nodes[node][\"true_card\"] = CROSS_JOIN_CARD\n # pdb.set_trace()\n\n _plot_join_order_graph(G, G.base_table_nodes, G.join_nodes, pdf, title, fn)\n return G\n\ndef draw_plan_graph(subsetg, y, cost_model, ax=None,\n source_node=SOURCE_NODE, final_node=None, font_size=40,\n cbar_fontsize=24, cax=None, fig=None, width=None,\n edge_color=None,\n bold_opt_path=True, bold_path=None):\n\n for n in subsetg.nodes():\n joined = \" \\Join \".join(n)\n joined = \"$\" + joined + \"$\"\n subsetg.nodes()[n][\"label\"] = joined\n\n if y is not None and cost_model is not None:\n cost_key = \"tmp_cost\"\n subsetg = subsetg.reverse()\n tcost = update_subplan_costs(subsetg, cost_model,\n cost_key=cost_key, ests=y)\n\n # TODO: need to add the flow-loss computing module\n # flows, edges = get_flows(subsetg, cost_model+cost_key)\n # Flow-Loss specific widths\n # MIN: 2...6\n # MIN_WIDTH = 1.0\n # MAX_WIDTH = 30.0\n # NEW_RANGE = MAX_WIDTH - MIN_WIDTH\n # OLD_RANGE = max(flows) - min(flows)\n\n # edge_widths = {}\n # for i, x in enumerate(flows):\n # normx = (((x - min(flows))*NEW_RANGE) / OLD_RANGE) + MIN_WIDTH\n # edge_widths[edges[i]] = normx\n # widths = []\n # for edge in subsetg.edges():\n # key = tuple([edge[1], edge[0]])\n # widths.append(edge_widths[key])\n\n # reverse back\n subsetg = subsetg.reverse()\n widths = []\n for edge in subsetg.edges():\n key = tuple([edge[1], edge[0]])\n widths.append(1.0)\n\n edge_colors = []\n for edge in subsetg.edges(data=True):\n edge_colors.append(edge[2][cost_model+cost_key])\n\n vmin = min(edge_colors)\n vmax = max(edge_colors)\n\n # assert len(edge_colors) == len(flows)\n opt_labels_list = nx.shortest_path(subsetg, source_node,\n final_node, weight=cost_model+cost_key)\n opt_labels = {}\n for n in subsetg.nodes(data=True):\n if n[0] in opt_labels_list:\n opt_labels[n[0]] = n[1][\"label\"]\n\n cm = matplotlib.colors.LinearSegmentedColormap.from_list(\"\", [\"green\", \"yellow\", \"red\"])\n\n else:\n widths = []\n for edge in subsetg.edges():\n key = tuple([edge[1], edge[0]])\n widths.append(2.0)\n cm = None\n\n pos = nx.nx_pydot.pydot_layout(subsetg, prog=\"dot\")\n\n if ax is None:\n fig, ax = plt.subplots(1,1,figsize=(30,20))\n\n labels = nx.get_node_attributes(subsetg, 'label')\n\n nx.draw_networkx_labels(subsetg, pos=pos,\n labels=labels,\n ax=ax, font_size=font_size,\n bbox=dict(facecolor=\"w\", edgecolor='k', boxstyle='round,pad=0.1'))\n\n if bold_opt_path and cost_model is not None:\n nx.draw_networkx_labels(subsetg, pos=pos,\n labels=opt_labels, ax=ax,\n font_size=font_size,\n bbox=dict(facecolor=\"w\", edgecolor='k',\n lw=font_size/2, boxstyle='round,pad=0.5', fill=True))\n\n if bold_path and cost_model is not None:\n bold_labels = {}\n for n in subsetg.nodes(data=True):\n if n[0] in bold_path:\n bold_labels[n[0]] = n[1][\"label\"]\n nx.draw_networkx_labels(subsetg, pos=pos,\n labels=bold_labels, ax=ax,\n font_size=font_size,\n bbox=dict(facecolor=\"w\", edgecolor='k',\n lw=font_size/2, boxstyle='round,pad=0.5', fill=True))\n\n if edge_color is not None:\n edge_colors = edge_color\n\n edges = nx.draw_networkx_edges(subsetg, pos,\n edge_color=edge_colors,\n width=widths, ax = ax, edge_cmap=cm,\n arrows=True,\n arrowsize=font_size / 2,\n arrowstyle='simple',\n min_target_margin=5.0)\n\n if y is not None and cost_model is not None:\n plt.style.use(\"seaborn-white\")\n sm = plt.cm.ScalarMappable(cmap=cm,\n norm=plt.Normalize(vmin=vmin, vmax=vmax))\n sm.set_array([])\n if fig is None:\n cbar = plt.colorbar(sm, aspect=50,\n orientation=\"horizontal\", pad =\n 0.02)\n else:\n cbar = fig.colorbar(sm, ax=ax,\n pad = 0.02,\n aspect=50,\n orientation=\"horizontal\")\n\n cbar.ax.tick_params(labelsize=font_size)\n cbar.set_label(\"Cost\", fontsize=font_size)\n cbar.ax.xaxis.get_offset_text().set_fontsize(font_size)\n\n plt.tight_layout()\n", "repo_name": "learnedsystems/CEB", "sub_path": "query_representation/viz.py", "file_name": "viz.py", "file_ext": "py", "file_size_in_byte": 15652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 51, "dataset": "github-code", "pt": "61", "api": [{"api_name": "networkx.DiGraph", "line_number": 148, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "grandalf.utils.convert_nextworkx_graph_to_grandalf", "line_number": 206, "usage_type": "call"}, {"api_name": "grandalf.utils", "line_number": 206, "usage_type": "attribute"}, {"api_name": "grandalf.layouts.SugiyamaLayout", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.colors.LinearSegmentedColormap.from_list", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 247, "usage_type": "attribute"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.ScalarMappable", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 257, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 261, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 261, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 268, "usage_type": "name"}, {"api_name": "matplotlib.patches.Patch", "line_number": 269, "usage_type": "call"}, {"api_name": "matplotlib.patches", "line_number": 269, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 270, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 270, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "utils.SOURCE_NODE", "line_number": 328, "usage_type": "name"}, {"api_name": "evaluation.cost_model.update_subplan_costs", "line_number": 341, "usage_type": "call"}, {"api_name": "networkx.shortest_path", "line_number": 377, "usage_type": "call"}, {"api_name": "matplotlib.colors.LinearSegmentedColormap.from_list", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 384, "usage_type": "attribute"}, {"api_name": "networkx.nx_pydot.pydot_layout", "line_number": 393, "usage_type": "call"}, {"api_name": "networkx.nx_pydot", "line_number": 393, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 396, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 396, "usage_type": "name"}, {"api_name": "networkx.get_node_attributes", "line_number": 398, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 400, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 406, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_labels", "line_number": 417, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 426, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 435, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 435, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 435, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.ScalarMappable", "line_number": 436, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 436, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 436, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 437, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 437, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 440, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 453, "usage_type": "name"}]} +{"seq_id": "36805208294", "text": "from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom challenges.models import CommunityChallenge\n\ndef challenges(request):\n num_challenge_posts = CommunityChallenge.objects.all().count()\n posts = CommunityChallenge.objects.order_by('-published_date')\n\n context = {\n 'num_challenge_posts': num_challenge_posts,\n 'challenges': posts,\n }\n\n\n return render(request, 'challenges.html', context=context)", "repo_name": "katiehrenchir/you-go-girl", "sub_path": "challenges/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "challenges.models.CommunityChallenge.objects.all", "line_number": 6, "usage_type": "call"}, {"api_name": "challenges.models.CommunityChallenge.objects", "line_number": 6, "usage_type": "attribute"}, {"api_name": "challenges.models.CommunityChallenge", "line_number": 6, "usage_type": "name"}, {"api_name": "challenges.models.CommunityChallenge.objects.order_by", "line_number": 7, "usage_type": "call"}, {"api_name": "challenges.models.CommunityChallenge.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "challenges.models.CommunityChallenge", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "34093717952", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"These functions calculate the similarity of two images of the same size.\"\"\"\n\n\nimport cv2\nfrom .utils import img_mat_rgb_2_gray\n\n\ndef cal_ccoeff_confidence(im_source, im_search):\n \"\"\"求取两张图片的可信度,使用TM_CCOEFF_NORMED方法.\"\"\"\n # 扩展置信度计算区域\n im_search = cv2.copyMakeBorder(im_search, 10,10,10,10,cv2.BORDER_REPLICATE)\n \n im_source, im_search = img_mat_rgb_2_gray(im_source), img_mat_rgb_2_gray(im_search)\n res = cv2.matchTemplate(im_source, im_search, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n return max_val\n\n\ndef cal_rgb_confidence(img_src_rgb, img_sch_rgb):\n \"\"\"同大小彩图计算相似度.\"\"\"\n # 扩展置信度计算区域\n img_sch_rgb = cv2.copyMakeBorder(img_sch_rgb, 10,10,10,10,cv2.BORDER_REPLICATE)\n # 转HSV强化颜色的影响\n img_src_rgb = cv2.cvtColor(img_src_rgb, cv2.COLOR_BGR2HSV)\n img_sch_rgb = cv2.cvtColor(img_sch_rgb, cv2.COLOR_BGR2HSV)\n src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)\n\n # 计算BGR三通道的confidence,存入bgr_confidence:\n bgr_confidence = [0, 0, 0]\n # 加入取值范围干扰,防止算法过于放大微小差异\n src_bgr[0][0,0] = sch_bgr[0][0,0] = 0\n src_bgr[0][0,1] = sch_bgr[0][0,1] = 255\n for i in range(3):\n res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)\n bgr_confidence[i] = max_val\n\n return min(bgr_confidence)\n", "repo_name": "manito-666/air_uitest", "sub_path": "lib/python3.9/site-packages/airtest/aircv/cal_confidence.py", "file_name": "cal_confidence.py", "file_ext": "py", "file_size_in_byte": 1595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "cv2.copyMakeBorder", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "utils.img_mat_rgb_2_gray", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.matchTemplate", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.minMaxLoc", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.copyMakeBorder", "line_number": 26, "usage_type": "call"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.split", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.matchTemplate", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.TM_CCOEFF_NORMED", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.minMaxLoc", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "36448126999", "text": "import asyncio\nimport logging\nimport time\nimport six\nimport json\nimport os\nimport mpyq\nimport async_timeout\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\n\nfrom .client import Client\nfrom .data import CreateGameError, Result\nfrom .game_state import GameState\nfrom .player import Bot, Human\nfrom .portconfig import Portconfig\nfrom .protocol import ConnectionAlreadyClosed, ProtocolError\nfrom .sc2process import SC2Process\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlidingTimeWindow:\n def __init__(self, size: int):\n assert size > 0\n\n self.window_size = size\n self.window = []\n\n def push(self, value: float):\n self.window = (self.window + [value])[-self.window_size :]\n\n def clear(self):\n self.window = []\n\n @property\n def sum(self) -> float:\n return sum(self.window)\n\n @property\n def available(self) -> float:\n return sum(self.window[1:])\n\n @property\n def available_fmt(self) -> float:\n return \",\".join(f\"{w:.2f}\" for w in self.window[1:])\n\n\nasync def _play_game_human(client, player_id, realtime, game_time_limit):\n while True:\n state = await client.observation()\n if client._game_result:\n return client._game_result[player_id]\n\n if game_time_limit and (state.observation.observation.game_loop * 0.725 * (1 / 16)) > game_time_limit:\n print(state.observation.game_loop, state.observation.game_loop * 0.14)\n return Result.Tie\n\n if not realtime:\n await client.step()\n\n\nasync def _play_game_ai(client, player_id, ai, realtime, step_time_limit, game_time_limit):\n if realtime:\n assert step_time_limit is None\n\n # step_time_limit works like this:\n # * If None, then step time is not limited\n # * If given integer or float, the bot will simpy resign if any step takes longer than that\n # * Otherwise step_time_limit must be an object, with following settings:\n #\n # Key | Value | Description\n # ------------|------------|-------------\n # penalty | None | No penalty, the bot can continue on next step\n # penalty | N: int | Cooldown penalty, BotAI.on_step will not be called for N steps\n # penalty | \"resign\" | Bot resigns when going over time limit\n # time_limit | int/float | Time limit for a single step\n # window_size | N: int | The time limit will be used for last N steps, instad of 1\n #\n # Cooldown is a harsh penalty. The both loses the ability to act, but even worse,\n # the observation data from skipped steps is also lost. It's like falling asleep in\n # a middle of the game.\n time_penalty_cooldown = 0\n if step_time_limit is None:\n time_limit = None\n time_window = None\n time_penalty = None\n elif isinstance(step_time_limit, (int, float)):\n time_limit = float(step_time_limit)\n time_window = SlidingTimeWindow(1)\n time_penalty = \"resign\"\n else:\n assert isinstance(step_time_limit, dict)\n time_penalty = step_time_limit.get(\"penalty\", None)\n time_window = SlidingTimeWindow(int(step_time_limit.get(\"window_size\", 1)))\n time_limit = float(step_time_limit.get(\"time_limit\", None))\n\n ai._initialize_variables()\n\n game_data = await client.get_game_data()\n game_info = await client.get_game_info()\n\n # This game_data will become self._game_data in botAI\n ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n gs = GameState(state.observation)\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n await ai.on_before_start()\n ai._prepare_first_step()\n try:\n await ai.on_start()\n except Exception as e:\n logger.exception(f\"AI on_start threw an error\")\n logger.error(f\"resigning due to previous error\")\n await ai.on_end(Result.Defeat)\n return Result.Defeat\n\n iteration = 0\n while True:\n if iteration != 0:\n if realtime:\n # On realtime=True, might get an error here: sc2.protocol.ProtocolError: ['Not in a game']\n try:\n requested_step = gs.game_loop + client.game_step\n state = await client.observation(requested_step)\n # If the bot took too long in the previous observation, request another observation one frame after\n if state.observation.observation.game_loop > requested_step:\n # TODO Remove these 2 comments\n # t = state.observation.observation.game_loop\n state = await client.observation(state.observation.observation.game_loop + 1)\n # print(f\"Requested step: {requested_step}, received: {t}, new: {state.observation.observation.game_loop}\")\n except ProtocolError:\n pass\n else:\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n try:\n await ai.on_end(client._game_result[player_id])\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {client._game_result[player_id]}\")\n return client._game_result[player_id]\n return client._game_result[player_id]\n gs = GameState(state.observation)\n logger.debug(f\"Score: {gs.score.score}\")\n\n if game_time_limit and (gs.game_loop * 0.725 * (1 / 16)) > game_time_limit:\n await ai.on_end(Result.Tie)\n return Result.Tie\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n\n logger.debug(f\"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s\")\n\n try:\n if realtime:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n if time_penalty_cooldown > 0:\n time_penalty_cooldown -= 1\n logger.warning(f\"Running AI step: penalty cooldown: {time_penalty_cooldown}\")\n iteration -= 1 # Do not increment the iteration on this round\n elif time_limit is None:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n out_of_budget = False\n budget = time_limit - time_window.available\n\n # Tell the bot how much time it has left attribute\n ai.time_budget_available = budget\n\n if budget < 0:\n logger.warning(f\"Running AI step: out of budget before step\")\n step_time = 0.0\n out_of_budget = True\n else:\n step_start = time.monotonic()\n try:\n async with async_timeout.timeout(budget):\n await ai.issue_events()\n await ai.on_step(iteration)\n except asyncio.TimeoutError:\n step_time = time.monotonic() - step_start\n logger.warning(\n f\"Running AI step: out of budget; \"\n + f\"budget={budget:.2f}, steptime={step_time:.2f}, \"\n + f\"window={time_window.available_fmt}\"\n )\n out_of_budget = True\n step_time = time.monotonic() - step_start\n\n time_window.push(step_time)\n\n if out_of_budget and time_penalty is not None:\n if time_penalty == \"resign\":\n raise RuntimeError(\"Out of time\")\n else:\n time_penalty_cooldown = int(time_penalty)\n time_window.clear()\n\n await ai._after_step()\n except Exception as e:\n if isinstance(e, ProtocolError) and e.is_game_over_error:\n if realtime:\n return None\n result = client._game_result[player_id]\n if result is None:\n logger.error(\"Game over, but no results gathered\")\n raise\n await ai.on_end(result)\n return result\n # NOTE: this message is caught by pytest suite\n logger.exception(f\"AI step threw an error\") # DO NOT EDIT!\n logger.error(f\"Error: {e}\")\n logger.error(f\"Resigning due to previous error\")\n try:\n await ai.on_end(Result.Defeat)\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {Result.Defeat}\")\n return Result.Defeat\n return Result.Defeat\n\n logger.debug(f\"Running AI step: done\")\n\n if not realtime:\n if not client.in_game: # Client left (resigned) the game\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n\n await client.step()\n\n iteration += 1\n\n\nasync def _play_game(\n player, client, realtime, portconfig, step_time_limit=None, game_time_limit=None, rgb_render_config=None\n):\n assert isinstance(realtime, bool), repr(realtime)\n\n player_id = await client.join_game(\n player.name, player.race, portconfig=portconfig, rgb_render_config=rgb_render_config\n )\n logging.info(f\"Player {player_id} - {player.name if player.name else str(player)}\")\n\n if isinstance(player, Human):\n result = await _play_game_human(client, player_id, realtime, game_time_limit)\n else:\n result = await _play_game_ai(client, player_id, player.ai, realtime, step_time_limit, game_time_limit)\n\n logging.info(f\"Result for player {player_id} - {player.name if player.name else str(player)}: {result._name_}\")\n\n return result\n\n\nasync def _play_replay(client, ai, realtime=False, player_id=0):\n ai._initialize_variables()\n\n game_data = await client.get_game_data()\n game_info = await client.get_game_info()\n client.game_step = 1\n # This game_data will become self._game_data in botAI\n ai._prepare_start(client, player_id, game_info, game_data, realtime=realtime)\n state = await client.observation()\n # Check game result every time we get the observation\n if client._game_result:\n await ai.on_end(client._game_result[player_id])\n return client._game_result[player_id]\n gs = GameState(state.observation)\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n ai._prepare_first_step()\n try:\n await ai.on_start()\n except Exception as e:\n logger.exception(f\"AI on_start threw an error\")\n logger.error(f\"resigning due to previous error\")\n await ai.on_end(Result.Defeat)\n return Result.Defeat\n\n iteration = 0\n while True:\n if iteration != 0:\n if realtime:\n # TODO: check what happens if a bot takes too long to respond, so that the requested\n # game_loop might already be in the past\n state = await client.observation(gs.game_loop + client.game_step)\n else:\n state = await client.observation()\n # check game result every time we get the observation\n if client._game_result:\n try:\n await ai.on_end(client._game_result[player_id])\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {client._game_result[player_id]}\")\n return client._game_result[player_id]\n return client._game_result[player_id]\n gs = GameState(state.observation)\n logger.debug(f\"Score: {gs.score.score}\")\n\n proto_game_info = await client._execute(game_info=sc_pb.RequestGameInfo())\n ai._prepare_step(gs, proto_game_info)\n\n logger.debug(f\"Running AI step, it={iteration} {gs.game_loop * 0.725 * (1 / 16):.2f}s\")\n\n try:\n if realtime:\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n else:\n\n # Issue event like unit created or unit destroyed\n await ai.issue_events()\n await ai.on_step(iteration)\n await ai._after_step()\n\n except Exception as e:\n if isinstance(e, ProtocolError) and e.is_game_over_error:\n if realtime:\n return None\n # result = client._game_result[player_id]\n # if result is None:\n # logger.error(\"Game over, but no results gathered\")\n # raise\n await ai.on_end(Result.Victory)\n return None\n # NOTE: this message is caught by pytest suite\n logger.exception(f\"AI step threw an error\") # DO NOT EDIT!\n logger.error(f\"Error: {e}\")\n logger.error(f\"Resigning due to previous error\")\n try:\n await ai.on_end(Result.Defeat)\n except TypeError as error:\n # print(f\"caught type error {error}\")\n # print(f\"return {Result.Defeat}\")\n return Result.Defeat\n return Result.Defeat\n\n logger.debug(f\"Running AI step: done\")\n\n if not realtime:\n if not client.in_game: # Client left (resigned) the game\n await ai.on_end(Result.Victory)\n return Result.Victory\n\n await client.step() # unindent one line to work in realtime\n\n iteration += 1\n\n\nasync def _setup_host_game(server, map_settings, players, realtime, random_seed=None, disable_fog=None):\n r = await server.create_game(map_settings, players, realtime, random_seed, disable_fog)\n if r.create_game.HasField(\"error\"):\n err = f\"Could not create game: {CreateGameError(r.create_game.error)}\"\n if r.create_game.HasField(\"error_details\"):\n err += f\": {r.create_game.error_details}\"\n logger.critical(err)\n raise RuntimeError(err)\n\n return Client(server._ws)\n\n\nasync def _host_game(\n map_settings,\n players,\n realtime,\n portconfig=None,\n save_replay_as=None,\n step_time_limit=None,\n game_time_limit=None,\n rgb_render_config=None,\n random_seed=None,\n sc2_version=None,\n disable_fog=None,\n):\n\n assert players, \"Can't create a game without players\"\n\n assert any(isinstance(p, (Human, Bot)) for p in players)\n\n async with SC2Process(\n fullscreen=players[0].fullscreen, render=rgb_render_config is not None, sc2_version=sc2_version\n ) as server:\n await server.ping()\n\n client = await _setup_host_game(server, map_settings, players, realtime, random_seed, disable_fog)\n # Bot can decide if it wants to launch with 'raw_affects_selection=True'\n if not isinstance(players[0], Human) and getattr(players[0].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[0].ai.raw_affects_selection\n\n try:\n result = await _play_game(\n players[0], client, realtime, portconfig, step_time_limit, game_time_limit, rgb_render_config\n )\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n await client.quit()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return None\n\n return result\n\n\nasync def _host_game_aiter(\n map_settings, players, realtime, portconfig=None, save_replay_as=None, step_time_limit=None, game_time_limit=None,\n):\n assert players, \"Can't create a game without players\"\n\n assert any(isinstance(p, (Human, Bot)) for p in players)\n\n async with SC2Process() as server:\n while True:\n await server.ping()\n\n client = await _setup_host_game(server, map_settings, players, realtime)\n if not isinstance(players[0], Human) and getattr(players[0].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[0].ai.raw_affects_selection\n\n try:\n result = await _play_game(players[0], client, realtime, portconfig, step_time_limit, game_time_limit)\n\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return\n\n new_players = yield result\n if new_players is not None:\n players = new_players\n\n\ndef _host_game_iter(*args, **kwargs):\n game = _host_game_aiter(*args, **kwargs)\n new_playerconfig = None\n while True:\n new_playerconfig = yield asyncio.get_event_loop().run_until_complete(game.asend(new_playerconfig))\n\n\nasync def _join_game(\n players, realtime, portconfig, save_replay_as=None, step_time_limit=None, game_time_limit=None,\n):\n async with SC2Process(fullscreen=players[1].fullscreen) as server:\n await server.ping()\n\n client = Client(server._ws)\n # Bot can decide if it wants to launch with 'raw_affects_selection=True'\n if not isinstance(players[1], Human) and getattr(players[1].ai, \"raw_affects_selection\", None) is not None:\n client.raw_affects_selection = players[1].ai.raw_affects_selection\n\n try:\n result = await _play_game(players[1], client, realtime, portconfig, step_time_limit, game_time_limit)\n if save_replay_as is not None:\n await client.save_replay(save_replay_as)\n await client.leave()\n await client.quit()\n except ConnectionAlreadyClosed:\n logging.error(f\"Connection was closed before the game ended\")\n return None\n\n return result\n\n\nasync def _setup_replay(server, replay_path, realtime, observed_id):\n await server.start_replay(replay_path, realtime, observed_id)\n return Client(server._ws)\n\n\nasync def _host_replay(replay_path, ai, realtime, portconfig, base_build, data_version, observed_id):\n async with SC2Process(fullscreen=False, base_build=base_build, data_hash=data_version) as server:\n response = await server.ping()\n\n client = await _setup_replay(server, replay_path, realtime, observed_id)\n result = await _play_replay(client, ai, realtime)\n return result\n\n\ndef get_replay_version(replay_path):\n with open(replay_path, \"rb\") as f:\n replay_data = f.read()\n replay_io = six.BytesIO()\n replay_io.write(replay_data)\n replay_io.seek(0)\n archive = mpyq.MPQArchive(replay_io).extract()\n metadata = json.loads(archive[b\"replay.gamemetadata.json\"].decode(\"utf-8\"))\n return metadata[\"BaseBuild\"], metadata[\"DataVersion\"]\n\n\ndef run_game(map_settings, players, **kwargs):\n if sum(isinstance(p, (Human, Bot)) for p in players) > 1:\n host_only_args = [\"save_replay_as\", \"rgb_render_config\", \"random_seed\", \"sc2_version\", \"disable_fog\"]\n join_kwargs = {k: v for k, v in kwargs.items() if k not in host_only_args}\n\n portconfig = Portconfig()\n result = asyncio.get_event_loop().run_until_complete(\n asyncio.gather(\n _host_game(map_settings, players, **kwargs, portconfig=portconfig),\n _join_game(players, **join_kwargs, portconfig=portconfig),\n )\n )\n else:\n result = asyncio.get_event_loop().run_until_complete(_host_game(map_settings, players, **kwargs))\n return result\n\n\ndef run_replay(ai, replay_path, realtime=False, observed_id=0):\n portconfig = Portconfig()\n assert os.path.isfile(replay_path), f\"Replay does not exist at the given path: {replay_path}\"\n assert os.path.isabs(\n replay_path\n ), f'Replay path has to be an absolute path, e.g. \"C:/replays/my_replay.SC2Replay\" but given path was \"{replay_path}\"'\n base_build, data_version = get_replay_version(replay_path)\n result = asyncio.get_event_loop().run_until_complete(\n _host_replay(replay_path, ai, realtime, portconfig, base_build, data_version, observed_id)\n )\n return result\n", "repo_name": "mitchkoko/firstbot", "sub_path": "python-sc2/sc2/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 21385, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "client.observation", "line_number": 50, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 51, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 52, "usage_type": "attribute"}, {"api_name": "data.Result.Tie", "line_number": 56, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 56, "usage_type": "name"}, {"api_name": "client.step", "line_number": 59, "usage_type": "call"}, {"api_name": "client.get_game_data", "line_number": 99, "usage_type": "call"}, {"api_name": "client.get_game_info", "line_number": 100, "usage_type": "call"}, {"api_name": "client.observation", "line_number": 104, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 106, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 107, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 108, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 109, "usage_type": "call"}, {"api_name": "client._execute", "line_number": 110, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 110, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 110, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 119, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 119, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 120, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 120, "usage_type": "name"}, {"api_name": "client.game_step", "line_number": 128, "usage_type": "attribute"}, {"api_name": "client.observation", "line_number": 129, "usage_type": "call"}, {"api_name": "client.observation", "line_number": 134, "usage_type": "call"}, {"api_name": "protocol.ProtocolError", "line_number": 136, "usage_type": "name"}, {"api_name": "client.observation", "line_number": 139, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 141, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 143, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 147, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 148, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 149, "usage_type": "call"}, {"api_name": "data.Result.Tie", "line_number": 153, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 153, "usage_type": "name"}, {"api_name": "data.Result.Tie", "line_number": 154, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 154, "usage_type": "name"}, {"api_name": "client._execute", "line_number": 155, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 155, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 155, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 188, "usage_type": "call"}, {"api_name": "async_timeout.timeout", "line_number": 190, "usage_type": "call"}, {"api_name": "asyncio.TimeoutError", "line_number": 193, "usage_type": "attribute"}, {"api_name": "time.monotonic", "line_number": 194, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 201, "usage_type": "call"}, {"api_name": "protocol.ProtocolError", "line_number": 214, "usage_type": "argument"}, {"api_name": "client._game_result", "line_number": 217, "usage_type": "attribute"}, {"api_name": "data.Result.Defeat", "line_number": 228, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 228, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 232, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 232, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 233, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 233, "usage_type": "name"}, {"api_name": "client.in_game", "line_number": 238, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 239, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 240, "usage_type": "attribute"}, {"api_name": "client.step", "line_number": 242, "usage_type": "call"}, {"api_name": "client.join_game", "line_number": 252, "usage_type": "call"}, {"api_name": "player.name", "line_number": 253, "usage_type": "attribute"}, {"api_name": "player.race", "line_number": 253, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 255, "usage_type": "call"}, {"api_name": "player.name", "line_number": 255, "usage_type": "attribute"}, {"api_name": "player.Human", "line_number": 257, "usage_type": "argument"}, {"api_name": "player.ai", "line_number": 260, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 262, "usage_type": "call"}, {"api_name": "player.name", "line_number": 262, "usage_type": "attribute"}, {"api_name": "client.get_game_data", "line_number": 270, "usage_type": "call"}, {"api_name": "client.get_game_info", "line_number": 271, "usage_type": "call"}, {"api_name": "client.game_step", "line_number": 272, "usage_type": "attribute"}, {"api_name": "client.observation", "line_number": 275, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 277, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 278, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 279, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 280, "usage_type": "call"}, {"api_name": "client._execute", "line_number": 281, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 281, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 281, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 289, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 289, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 290, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 290, "usage_type": "name"}, {"api_name": "client.observation", "line_number": 298, "usage_type": "call"}, {"api_name": "client.game_step", "line_number": 298, "usage_type": "attribute"}, {"api_name": "client.observation", "line_number": 300, "usage_type": "call"}, {"api_name": "client._game_result", "line_number": 302, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 304, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 308, "usage_type": "attribute"}, {"api_name": "client._game_result", "line_number": 309, "usage_type": "attribute"}, {"api_name": "game_state.GameState", "line_number": 310, "usage_type": "call"}, {"api_name": "client._execute", "line_number": 313, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2.RequestGameInfo", "line_number": 313, "usage_type": "call"}, {"api_name": "s2clientprotocol.sc2api_pb2", "line_number": 313, "usage_type": "name"}, {"api_name": "protocol.ProtocolError", "line_number": 332, "usage_type": "argument"}, {"api_name": "data.Result.Victory", "line_number": 339, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 339, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 346, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 346, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 350, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 350, "usage_type": "name"}, {"api_name": "data.Result.Defeat", "line_number": 351, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 351, "usage_type": "name"}, {"api_name": "client.in_game", "line_number": 356, "usage_type": "attribute"}, {"api_name": "data.Result.Victory", "line_number": 357, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 357, "usage_type": "name"}, {"api_name": "data.Result.Victory", "line_number": 358, "usage_type": "attribute"}, {"api_name": "data.Result", "line_number": 358, "usage_type": "name"}, {"api_name": "client.step", "line_number": 360, "usage_type": "call"}, {"api_name": "data.CreateGameError", "line_number": 368, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 374, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 393, "usage_type": "name"}, {"api_name": "player.Bot", "line_number": 393, "usage_type": "name"}, {"api_name": "sc2process.SC2Process", "line_number": 395, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 402, "usage_type": "argument"}, {"api_name": "client.raw_affects_selection", "line_number": 403, "usage_type": "attribute"}, {"api_name": "client.save_replay", "line_number": 410, "usage_type": "call"}, {"api_name": "client.leave", "line_number": 411, "usage_type": "call"}, {"api_name": "client.quit", "line_number": 412, "usage_type": "call"}, {"api_name": "protocol.ConnectionAlreadyClosed", "line_number": 413, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 414, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 425, "usage_type": "name"}, {"api_name": "player.Bot", "line_number": 425, "usage_type": "name"}, {"api_name": "sc2process.SC2Process", "line_number": 427, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 432, "usage_type": "argument"}, {"api_name": "client.raw_affects_selection", "line_number": 433, "usage_type": "attribute"}, {"api_name": "client.save_replay", "line_number": 439, "usage_type": "call"}, {"api_name": "client.leave", "line_number": 440, "usage_type": "call"}, {"api_name": "protocol.ConnectionAlreadyClosed", "line_number": 441, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 442, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 454, "usage_type": "call"}, {"api_name": "sc2process.SC2Process", "line_number": 460, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 463, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 465, "usage_type": "argument"}, {"api_name": "client.raw_affects_selection", "line_number": 466, "usage_type": "attribute"}, {"api_name": "client.save_replay", "line_number": 471, "usage_type": "call"}, {"api_name": "client.leave", "line_number": 472, "usage_type": "call"}, {"api_name": "client.quit", "line_number": 473, "usage_type": "call"}, {"api_name": "protocol.ConnectionAlreadyClosed", "line_number": 474, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 475, "usage_type": "call"}, {"api_name": "client.Client", "line_number": 483, "usage_type": "call"}, {"api_name": "sc2process.SC2Process", "line_number": 487, "usage_type": "call"}, {"api_name": "six.BytesIO", "line_number": 498, "usage_type": "call"}, {"api_name": "mpyq.MPQArchive", "line_number": 501, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 502, "usage_type": "call"}, {"api_name": "player.Human", "line_number": 507, "usage_type": "name"}, {"api_name": "player.Bot", "line_number": 507, "usage_type": "name"}, {"api_name": "portconfig.Portconfig", "line_number": 511, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 512, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 513, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 519, "usage_type": "call"}, {"api_name": "portconfig.Portconfig", "line_number": 524, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 525, "usage_type": "call"}, {"api_name": "os.path", "line_number": 525, "usage_type": "attribute"}, {"api_name": "os.path.isabs", "line_number": 526, "usage_type": "call"}, {"api_name": "os.path", "line_number": 526, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 530, "usage_type": "call"}]} +{"seq_id": "14211099431", "text": "import cv2\r\nimport numpy as np\r\nfrom csv_managment import comparate_with_database\r\nimport socket\r\n\r\nadress = '0.0.0.0'\r\nport = 8081\r\n\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsock.bind((adress, port))\r\nsock.listen(1)\r\n\r\nconnections = []\r\nwithAndroid = False\r\n\r\nfinger_position_list = [[], []]\r\n\r\ndef string(vec):\r\n result = \"\"\r\n for i in vec:\r\n result += str(i) + \"!\"\r\n\r\n return result\r\n\r\ndef send(message):\r\n for connection in connections:\r\n connection.send(bytes(message + \"\\n\", 'utf-8'))\r\n\r\nif (withAndroid):\r\n print(\"Waiting for connections\")\r\n while True:\r\n client, a = sock.accept()\r\n connections.append(client)\r\n break\r\n\r\n print(\"Connected\")\r\n print(connections)\r\n\r\ncap = cv2.VideoCapture(0)\r\n_, img3 = cap.read()\r\n\r\nx1, y1, x2, y2 = 0,0,0,0\r\n\r\ncounter = 0\r\nsalidaFinal = \"\"\r\nisSend = False\r\nmensaje = \"\"\r\nwhile (cap.isOpened()):\r\n _, frame = cap.read()\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n \r\n # define range of white color in HSV\r\n # change it according to your need !\r\n #lower_white = np.array([0, 0, 215])\r\n #upper_white = np.array([180, 15, 255])\r\n #lower_white = np.array([0, 0, 230])\r\n #upper_white = np.array([180, 25, 255])\r\n \r\n # Threshold the HSV image to get only white colors\r\n #mask = cv2.inRange(hsv, lower_white, upper_white)\r\n # Bitwise-AND mask and original image\r\n #res = cv2.bitwise_and(frame,frame, mask= mask)\r\n #umbral = cv2.threshold(mask, 25, 255, cv2.THRESH_BINARY)[1]\r\n #umbral = cv2.dilate(umbral, None, iterations=2)\r\n \r\n #contornosimg = umbral.copy()\r\n # Buscamos contorno en la imagen\r\n #im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n \r\n \r\n \r\n \"\"\"\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 4000):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 40000):\r\n continue\r\n else:\r\n \r\n (xa, ya, wa, ha) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n if(xa>40 and ya>40 and wa+80(x1+30) or xa<(x1-30)):\r\n if(x1(y1+30) or ya<(y1-30)):\r\n if(y1 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n red_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"RED color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255))\r\n xr = x\r\n yr = y\r\n \r\n contornosimg = blue.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n xb = 0\r\n yb = 0\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x +w, y+h), (255, 0, 0), 2)\r\n blue_objects.append([(x + w)/2 , (y+h)/2])\r\n xb = x\r\n yb = y\r\n cv2.putText(frame,\"BLUE color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.7,(255,0,0))\r\n\r\n \r\n\r\n #Tracking the YELLOW Color\r\n \r\n contornosimg = yellow.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n yellow_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"YELLLOW color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n\r\n\r\n #Tracking the purple Color\r\n contornosimg = purple.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n purple_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"purple color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the green Color\r\n \r\n contornosimg = green.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n green_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"green color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the black Color\r\n \r\n contornosimg = black.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n black_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"black color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n \r\n \r\n\r\n #Tracking the orange Color\r\n \r\n \r\n \r\n \r\n \r\n contornosimg = orange.copy()\r\n im, contornos, hierarchy = cv2.findContours(contornosimg,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n for c in contornos:\r\n # Eliminamos los contornos más pequeños\r\n if (cv2.contourArea(c) < 200):\r\n continue\r\n # Obtenemos el bounds del contorno, el rectángulo mayor que engloba al contorno\r\n elif(cv2.contourArea(c) > 2000):\r\n continue\r\n else:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n # Dibujamos el rectángulo del bounds\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (255,0,255),2)\r\n orange_objects.append([x+w/2 , y+h/2])\r\n cv2.putText(frame,\"orange color\",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0))\r\n\r\n \r\n \r\n \r\n if(xb==0):\r\n if(xr>(x1+30) or xr<(x1-30)):\r\n if(x1(y1+30) or yr<(y1-30)):\r\n if(y1(x2+30) or xb<(x2-30)):\r\n if(x1(y2+30) or yb<(y2-30)):\r\n if(y2 Actuator:\n location: str = location_status.name\n status: str = location_status.value\n\n action: str = ''\n if status == 'Dirty':\n action = 'Suck'\n elif location == 'A':\n action = 'Right'\n elif location == 'B':\n action = 'Left'\n\n return Actuator('action', action)\n", "repo_name": "GrahamStrickland/aima", "sub_path": "ch02/agents/reflex_vacuum_agent.py", "file_name": "reflex_vacuum_agent.py", "file_ext": "py", "file_size_in_byte": 452, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "modules.sensor.Sensor", "line_number": 6, "usage_type": "name"}, {"api_name": "modules.actuator.Actuator", "line_number": 18, "usage_type": "call"}, {"api_name": "modules.actuator.Actuator", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "12958861331", "text": "#! python3\r\n# Reads text files, put them into lists and then input into an excel file\r\nimport openpyxl, os\r\ndef TextToExcel(folder):\r\n wb = openpyxl.Workbook()\r\n sheet = wb.active\r\n num_column = 0\r\n # Going through the file\r\n for foldername, subfolders, filenames in os.walk(folder):\r\n for fl_int in range(len(filenames)):\r\n filename = list(filenames)\r\n file_ = open(foldername + '\\\\' + filename[fl_int],'r')\r\n # Acquiring the text form .txt\r\n text_ = file_.readlines()\r\n text_ = text_[0].split(' ')\r\n for num_row in range(len(text_)):\r\n sheet.cell(row = num_row + 1, column = num_column + 1).value = text_[num_row]\r\n print(text_[num_row])\r\n num_column += 1\r\n wb.save('TextToExcel.xlsx')\r\n\r\nTextToExcel(r'C:\\Users\\Dr. Wan Asna\\Desktop\\Python Projects\\Automate the Boring Stuff\\Ch.13 - Working with Excel Spreadsheets\\num')", "repo_name": "QaisZainon/Learning-Coding", "sub_path": "Automate the Boring Stuff/Ch.13 - Working with Excel Spreadsheets/TextFilestoSpreadsheet.py", "file_name": "TextFilestoSpreadsheet.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "openpyxl.Workbook", "line_number": 5, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "73550887553", "text": "from pyzabbix import ZabbixAPI\nfrom zabbix import Zabbix\nimport time\nimport json\nfrom datetime import datetime\nimport os.path\nfrom os import path\n\n\nclass TrafficAnalyzer:\n\n def __init__(self, in_traffic_tag, in_incoming_traffic_id, in_outgoing_traffic_id):\n self.traffic_tag = in_traffic_tag\n self.incoming_traffic_id = in_incoming_traffic_id\n self.outgoing_traffic_id = in_outgoing_traffic_id\n\n self.final_result = list()\n\n # Zabbix API Credentials\n self.zabbix = Zabbix()\n self.ZABBIX_SERVER = self.zabbix.ZABBIX_SERVER\n self.ZABBIX_USER = self.zabbix.ZABBIX_USER\n self.ZABBIX_PSSW = self.zabbix.ZABBIX_PSSW\n self.zapi = ZabbixAPI(self.ZABBIX_SERVER)\n self.zapi.login(self.ZABBIX_USER, self.ZABBIX_PSSW)\n\n # Time frame to be considered\n self.time_till = time.mktime(datetime.now().timetuple())\n self.time_from = self.time_till - 60 * 60 * 1 # last 1 hours\n\n if path.exists('data_file.json'):\n os.remove('data_file.json')\n \n def check_traffic(self, source):\n \"\"\"\n Method to retrieve the historical values from some item (port) given its ID\n API connection to get item's history \n The returned values contains the item id, clock, value, and ns\n \"\"\"\n result = list()\n \n # Query item's history (integer) data\n history = self.zapi.history.get(itemids=[source],\n time_from=self.time_from,\n time_till=self.time_till,\n output='extend',\n limit='5000')\n\n # If nothing was found, try getting it from history (float) data\n if not len(history):\n history = self.zapi.history.get(itemids=[source],\n time_from=self.time_from,\n time_till=self.time_till,\n output='extend',\n limit='5000',\n history=0)\n\n # Create the list with entries using each data point information\n for point in history:\n result.append((int(point['clock']), int(point['value'])))\n\n return result\n\n def get_traffic(self, traffic_type='in'):\n \"\"\"\n Method to check Node/Port traffic\n \"\"\"\n if traffic_type == 'in':\n return self.check_traffic(self.incoming_traffic_id)\n else:\n return self.check_traffic(self.outgoing_traffic_id)\n\n def merge_traffic(self, source1, source2):\n \"\"\"\n Method to merge both traffic values in a same Data Structure\n \"\"\"\n i = 0\n while i < len(source1):\n tmp = (source1[i][0], source1[i][1], source2[i][1])\n self.final_result.append(tmp)\n i += 1\n\n def build_json(self, tag, points):\n \"\"\"\n Method to create a json object given the tag name and the points list\n \"\"\"\n dict_obj = {tag: {\"name\": \"\", \"utc\": True, \"columns\": [\"time\", \"in\", \"out\"],\n \"points\": points}}\n r = json.dumps(dict_obj)\n\n if path.exists('data_file.json'):\n data = dict()\n with open(\"data_file.json\", 'r+') as write_file:\n old_data = json.load(write_file)\n data = dict(old_data)\n data.update(dict_obj)\n\n with open(\"data_file.json\", 'r+') as write_file:\n json.dump(data, write_file)\n else:\n with open(\"data_file.json\", 'w+') as write_file:\n json.dump(dict_obj, write_file)\n\n write_file.close()\n print(r)\n\n def traffic_on_json(self):\n \"\"\"\n Compute the incoming traffic and outgoing traffic, merge both information in a same data structure\n and send that result to be structured as a JSON\n \"\"\"\n # Incoming Traffic\n results1 = self.get_traffic('in')\n\n # Outgoing Traffic\n results2 = self.get_traffic('out')\n\n self.merge_traffic(results1, results2)\n\n print(\"Merged Traffic\")\n self.build_json(self.traffic_tag, self.final_result)\n\n return self.final_result\n\n def total_traffic(self, source1, source2):\n \"\"\"\n Method to compute the total traffic between two points\n \"\"\"\n i = 0\n out_total_traffic = list()\n while i < len(source1):\n tmp = (source2[i][0], source1[i][1] + source2[i][1], source1[i][2] + source2[i][2])\n out_total_traffic.append(tmp)\n i += 1\n\n print(\"Total Traffic\")\n self.build_json(\"Total\", out_total_traffic)\n", "repo_name": "amlight/weathermap", "sub_path": "venv/include/trafficanalyzer.py", "file_name": "trafficanalyzer.py", "file_ext": "py", "file_size_in_byte": 4792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "zabbix.Zabbix", "line_number": 20, "usage_type": "call"}, {"api_name": "pyzabbix.ZabbixAPI", "line_number": 24, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "name"}, {"api_name": "json.load", "line_number": 94, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 99, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "12816261862", "text": "from django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nfrom . import views\n\napp_name = \"website\"\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^login$', views.login_user, name='login'),\n url(r'^logout$', views.user_logout, name='logout'),\n url(r'^register$', views.register, name='register'),\n url(r'^addmember$', views.addmember, name='addmember'), \n url(r'^adddetainee$', views.add_detainee, name='detainee'),\n url(r'^addsession$', views.session, name='addsession'),\n url(r'^session/(?P\\d+)/$', views.updatesessionrole.as_view(), name='updatesession'),\n url(r'^detainee/(?P\\d+)/$', views.detainee, name='detaineedetail'),\n url(r'^createreport$', views.report, name='report'),\n url(r'^report/(?P\\d+)/$', views.singlereport, name='singlereport'),\n url(r'^editreport/(?P\\d+)/$', views.editreport.as_view(), name='editreport'),\n url(r'^editprofile$', views.editprofile, name='editprofile'),\n url(r'^team$', views.team, name='team'),\n url(r'^pdfreport/(?P\\d+)/$', views.pdfreport, name='pdf')\n # url(r'^deleteuser$', views.userdelete, name='deleteuser'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)", "repo_name": "jcarter0149/reports-cap_stone_back", "sub_path": "website/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 27, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "29366537851", "text": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n################################################################################\n# Documentation\n################################################################################\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': [\"preview\"],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: gcp_compute_url_map\ndescription:\n - UrlMaps are used to route requests to a backend service based on rules that you\n define for the host and path of an incoming URL.\nshort_description: Creates a GCP UrlMap\nversion_added: 2.6\nauthor: Google Inc. (@googlecloudplatform)\nrequirements:\n - python >= 2.6\n - requests >= 2.18.4\n - google-auth >= 1.3.0\noptions:\n state:\n description:\n - Whether the given object should exist in GCP\n choices: ['present', 'absent']\n default: 'present'\n default_service:\n description:\n - A reference to BackendService resource if none of the hostRules match.\n required: true\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n required: false\n host_rules:\n description:\n - The list of HostRules to use against the URL.\n required: false\n suboptions:\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n required: false\n hosts:\n description:\n - The list of host patterns to match. They must be valid hostnames, except * will\n match any string of ([a-z0-9-.]*). In that case, * must be the first character and\n must be followed in the pattern by either - or .\n required: false\n path_matcher:\n description:\n - The name of the PathMatcher to use to match the path portion of the URL if the hostRule\n matches the URL's host portion.\n required: false\n name:\n description:\n - Name of the resource. Provided by the client when the resource is created. The name\n must be 1-63 characters long, and comply with RFC1035. Specifically, the name must\n be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`\n which means the first character must be a lowercase letter, and all following characters\n must be a dash, lowercase letter, or digit, except the last character, which cannot\n be a dash.\n required: false\n path_matchers:\n description:\n - The list of named PathMatchers to use against the URL.\n required: false\n suboptions:\n default_service:\n description:\n - A reference to a BackendService resource. This will be used if none of the pathRules\n defined by this PathMatcher is matched by the URL's path portion.\n required: false\n description:\n description:\n - An optional description of this resource.\n required: false\n name:\n description:\n - The name to which this PathMatcher is referred by the HostRule.\n required: false\n path_rules:\n description:\n - The list of path rules.\n required: false\n suboptions:\n paths:\n description:\n - 'The list of path patterns to match. Each must start with / and the only place a\n * is allowed is at the end following a /. The string fed to the path matcher does\n not include any text after the first ? or #, and those chars are not allowed here.'\n required: false\n service:\n description:\n - A reference to the BackendService resource if this rule is matched.\n required: false\n tests:\n description:\n - The list of expected URL mappings. Request to update this UrlMap will succeed only\n if all of the test cases pass.\n required: false\n suboptions:\n description:\n description:\n - Description of this test case.\n required: false\n host:\n description:\n - Host portion of the URL.\n required: false\n path:\n description:\n - Path portion of the URL.\n required: false\n service:\n description:\n - A reference to expected BackendService resource the given URL should be mapped to.\n required: false\nextends_documentation_fragment: gcp\n'''\n\nEXAMPLES = '''\n- name: create a instance group\n gcp_compute_instance_group:\n name: \"instancegroup-urlmap\"\n zone: us-central1-a\n project: \"{{ gcp_project }}\"\n auth_kind: \"{{ gcp_cred_kind }}\"\n service_account_file: \"{{ gcp_cred_file }}\"\n state: present\n register: instancegroup\n\n- name: create a http health check\n gcp_compute_http_health_check:\n name: \"httphealthcheck-urlmap\"\n healthy_threshold: 10\n port: 8080\n timeout_sec: 2\n unhealthy_threshold: 5\n project: \"{{ gcp_project }}\"\n auth_kind: \"{{ gcp_cred_kind }}\"\n service_account_file: \"{{ gcp_cred_file }}\"\n state: present\n register: healthcheck\n\n- name: create a backend service\n gcp_compute_backend_service:\n name: \"backendservice-urlmap\"\n backends:\n - group: \"{{ instancegroup }}\"\n health_checks:\n - \"{{ healthcheck.selfLink }}\"\n enable_cdn: true\n project: \"{{ gcp_project }}\"\n auth_kind: \"{{ gcp_cred_kind }}\"\n service_account_file: \"{{ gcp_cred_file }}\"\n state: present\n register: backendservice\n\n- name: create a url map\n gcp_compute_url_map:\n name: \"test_object\"\n default_service: \"{{ backendservice }}\"\n project: \"test_project\"\n auth_kind: \"service_account\"\n service_account_file: \"/tmp/auth.pem\"\n state: present\n'''\n\nRETURN = '''\n creation_timestamp:\n description:\n - Creation timestamp in RFC3339 text format.\n returned: success\n type: str\n default_service:\n description:\n - A reference to BackendService resource if none of the hostRules match.\n returned: success\n type: dict\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n returned: success\n type: str\n host_rules:\n description:\n - The list of HostRules to use against the URL.\n returned: success\n type: complex\n contains:\n description:\n description:\n - An optional description of this resource. Provide this property when you create\n the resource.\n returned: success\n type: str\n hosts:\n description:\n - The list of host patterns to match. They must be valid hostnames, except * will\n match any string of ([a-z0-9-.]*). In that case, * must be the first character and\n must be followed in the pattern by either - or .\n returned: success\n type: list\n path_matcher:\n description:\n - The name of the PathMatcher to use to match the path portion of the URL if the hostRule\n matches the URL's host portion.\n returned: success\n type: str\n id:\n description:\n - The unique identifier for the resource.\n returned: success\n type: int\n name:\n description:\n - Name of the resource. Provided by the client when the resource is created. The name\n must be 1-63 characters long, and comply with RFC1035. Specifically, the name must\n be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`\n which means the first character must be a lowercase letter, and all following characters\n must be a dash, lowercase letter, or digit, except the last character, which cannot\n be a dash.\n returned: success\n type: str\n path_matchers:\n description:\n - The list of named PathMatchers to use against the URL.\n returned: success\n type: complex\n contains:\n default_service:\n description:\n - A reference to a BackendService resource. This will be used if none of the pathRules\n defined by this PathMatcher is matched by the URL's path portion.\n returned: success\n type: dict\n description:\n description:\n - An optional description of this resource.\n returned: success\n type: str\n name:\n description:\n - The name to which this PathMatcher is referred by the HostRule.\n returned: success\n type: str\n path_rules:\n description:\n - The list of path rules.\n returned: success\n type: complex\n contains:\n paths:\n description:\n - 'The list of path patterns to match. Each must start with / and the only place a\n * is allowed is at the end following a /. The string fed to the path matcher does\n not include any text after the first ? or #, and those chars are not allowed here.'\n returned: success\n type: list\n service:\n description:\n - A reference to the BackendService resource if this rule is matched.\n returned: success\n type: dict\n tests:\n description:\n - The list of expected URL mappings. Request to update this UrlMap will succeed only\n if all of the test cases pass.\n returned: success\n type: complex\n contains:\n description:\n description:\n - Description of this test case.\n returned: success\n type: str\n host:\n description:\n - Host portion of the URL.\n returned: success\n type: str\n path:\n description:\n - Path portion of the URL.\n returned: success\n type: str\n service:\n description:\n - A reference to expected BackendService resource the given URL should be mapped to.\n returned: success\n type: dict\n'''\n\n################################################################################\n# Imports\n################################################################################\n\nfrom ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict\nimport json\nimport time\n\n################################################################################\n# Main\n################################################################################\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n module = GcpModule(\n argument_spec=dict(\n state=dict(default='present', choices=['present', 'absent'], type='str'),\n default_service=dict(required=True, type='dict'),\n description=dict(type='str'),\n host_rules=dict(type='list', elements='dict', options=dict(\n description=dict(type='str'),\n hosts=dict(type='list', elements='str'),\n path_matcher=dict(type='str')\n )),\n name=dict(type='str'),\n path_matchers=dict(type='list', elements='dict', options=dict(\n default_service=dict(type='dict'),\n description=dict(type='str'),\n name=dict(type='str'),\n path_rules=dict(type='list', elements='dict', options=dict(\n paths=dict(type='list', elements='str'),\n service=dict(type='dict')\n ))\n )),\n tests=dict(type='list', elements='dict', options=dict(\n description=dict(type='str'),\n host=dict(type='str'),\n path=dict(type='str'),\n service=dict(type='dict')\n ))\n )\n )\n\n if not module.params['scopes']:\n module.params['scopes'] = ['https://www.googleapis.com/auth/compute']\n\n state = module.params['state']\n kind = 'compute#urlMap'\n\n fetch = fetch_resource(module, self_link(module), kind)\n changed = False\n\n if fetch:\n if state == 'present':\n if is_different(module, fetch):\n fetch = update(module, self_link(module), kind)\n changed = True\n else:\n delete(module, self_link(module), kind)\n fetch = {}\n changed = True\n else:\n if state == 'present':\n fetch = create(module, collection(module), kind)\n changed = True\n else:\n fetch = {}\n\n fetch.update({'changed': changed})\n\n module.exit_json(**fetch)\n\n\ndef create(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.post(link, resource_to_request(module)))\n\n\ndef update(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.put(link, resource_to_request(module)))\n\n\ndef delete(module, link, kind):\n auth = GcpSession(module, 'compute')\n return wait_for_operation(module, auth.delete(link))\n\n\ndef resource_to_request(module):\n request = {\n u'kind': 'compute#urlMap',\n u'defaultService': replace_resource_dict(module.params.get(u'default_service', {}), 'selfLink'),\n u'description': module.params.get('description'),\n u'hostRules': UrlMapHostRulesArray(module.params.get('host_rules', []), module).to_request(),\n u'name': module.params.get('name'),\n u'pathMatchers': UrlMapPathMatchersArray(module.params.get('path_matchers', []), module).to_request(),\n u'tests': UrlMapTestsArray(module.params.get('tests', []), module).to_request()\n }\n return_vals = {}\n for k, v in request.items():\n if v:\n return_vals[k] = v\n\n return return_vals\n\n\ndef fetch_resource(module, link, kind):\n auth = GcpSession(module, 'compute')\n return return_if_object(module, auth.get(link), kind)\n\n\ndef self_link(module):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/global/urlMaps/{name}\".format(**module.params)\n\n\ndef collection(module):\n return \"https://www.googleapis.com/compute/v1/projects/{project}/global/urlMaps\".format(**module.params)\n\n\ndef return_if_object(module, response, kind):\n # If not found, return nothing.\n if response.status_code == 404:\n return None\n\n # If no content, return nothing.\n if response.status_code == 204:\n return None\n\n try:\n module.raise_for_status(response)\n result = response.json()\n except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:\n module.fail_json(msg=\"Invalid JSON response with error: %s\" % inst)\n\n if navigate_hash(result, ['error', 'errors']):\n module.fail_json(msg=navigate_hash(result, ['error', 'errors']))\n if result['kind'] != kind:\n module.fail_json(msg=\"Incorrect result: {kind}\".format(**result))\n\n return result\n\n\ndef is_different(module, response):\n request = resource_to_request(module)\n response = response_to_hash(module, response)\n\n # Remove all output-only from response.\n response_vals = {}\n for k, v in response.items():\n if k in request:\n response_vals[k] = v\n\n request_vals = {}\n for k, v in request.items():\n if k in response:\n request_vals[k] = v\n\n return GcpRequest(request_vals) != GcpRequest(response_vals)\n\n\n# Remove unnecessary properties from the response.\n# This is for doing comparisons with Ansible's current parameters.\ndef response_to_hash(module, response):\n return {\n u'creationTimestamp': response.get(u'creationTimestamp'),\n u'defaultService': response.get(u'defaultService'),\n u'description': response.get(u'description'),\n u'hostRules': UrlMapHostRulesArray(response.get(u'hostRules', []), module).from_response(),\n u'id': response.get(u'id'),\n u'name': response.get(u'name'),\n u'pathMatchers': UrlMapPathMatchersArray(response.get(u'pathMatchers', []), module).from_response(),\n u'tests': UrlMapTestsArray(response.get(u'tests', []), module).from_response()\n }\n\n\ndef async_op_url(module, extra_data=None):\n if extra_data is None:\n extra_data = {}\n url = \"https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}\"\n combined = extra_data.copy()\n combined.update(module.params)\n return url.format(**combined)\n\n\ndef wait_for_operation(module, response):\n op_result = return_if_object(module, response, 'compute#operation')\n if op_result is None:\n return {}\n status = navigate_hash(op_result, ['status'])\n wait_done = wait_for_completion(status, op_result, module)\n return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#urlMap')\n\n\ndef wait_for_completion(status, op_result, module):\n op_id = navigate_hash(op_result, ['name'])\n op_uri = async_op_url(module, {'op_id': op_id})\n while status != 'DONE':\n raise_if_errors(op_result, ['error', 'errors'], 'message')\n time.sleep(1.0)\n if status not in ['PENDING', 'RUNNING', 'DONE']:\n module.fail_json(msg=\"Invalid result %s\" % status)\n op_result = fetch_resource(module, op_uri, 'compute#operation')\n status = navigate_hash(op_result, ['status'])\n return op_result\n\n\ndef raise_if_errors(response, err_path, module):\n errors = navigate_hash(response, err_path)\n if errors is not None:\n module.fail_json(msg=errors)\n\n\nclass UrlMapHostRulesArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get('description'),\n u'hosts': item.get('hosts'),\n u'pathMatcher': item.get('path_matcher')\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get(u'description'),\n u'hosts': item.get(u'hosts'),\n u'pathMatcher': item.get(u'pathMatcher')\n })\n\n\nclass UrlMapPathMatchersArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'defaultService': replace_resource_dict(item.get(u'default_service', {}), 'selfLink'),\n u'description': item.get('description'),\n u'name': item.get('name'),\n u'pathRules': UrlMapPathRulesArray(item.get('path_rules', []), self.module).to_request()\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'defaultService': item.get(u'defaultService'),\n u'description': item.get(u'description'),\n u'name': item.get(u'name'),\n u'pathRules': UrlMapPathRulesArray(item.get(u'pathRules', []), self.module).from_response()\n })\n\n\nclass UrlMapPathRulesArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'paths': item.get('paths'),\n u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink')\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'paths': item.get(u'paths'),\n u'service': item.get(u'service')\n })\n\n\nclass UrlMapTestsArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get('description'),\n u'host': item.get('host'),\n u'path': item.get('path'),\n u'service': replace_resource_dict(item.get(u'service', {}), 'selfLink')\n })\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({\n u'description': item.get(u'description'),\n u'host': item.get(u'host'),\n u'path': item.get(u'path'),\n u'service': item.get(u'service')\n })\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "amitvashist7/ansible-development-CTS", "sub_path": "molecule/my_env/lib/python2.7/site-packages/ansible/modules/cloud/google/gcp_compute_url_map.py", "file_name": "gcp_compute_url_map.py", "file_ext": "py", "file_size_in_byte": 23371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "ansible.module_utils.gcp_utils.GcpModule", "line_number": 318, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 378, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 383, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 388, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 395, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpSession", "line_number": 411, "usage_type": "call"}, {"api_name": "json.decoder", "line_number": 435, "usage_type": "attribute"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 438, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 439, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.GcpRequest", "line_number": 461, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 492, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 494, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 498, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 502, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 506, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.navigate_hash", "line_number": 511, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 537, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 544, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 572, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 573, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 580, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 609, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 611, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 615, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 642, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.replace_resource_dict", "line_number": 646, "usage_type": "call"}, {"api_name": "ansible.module_utils.gcp_utils.remove_nones_from_dict", "line_number": 650, "usage_type": "call"}]} +{"seq_id": "73649840513", "text": "from database import Base\nfrom sqlalchemy import Column, Integer, String, Boolean, ForeignKey, DateTime, Float\nfrom sqlalchemy.types import DateTime\n\n\n\nfrom flask import Flask, request, jsonify, make_response\n\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nclass Medallions(Base):\n\t__tablename__ = 'medallions'\n\t\n\tid = Column(Integer, primary_key=True)\n\tmedallion = Column(String(50))\n\thack_license = Column(String(20))\n\tvendor_id = Column(String(20))\n\trate_code = Column(String(20))\n\tstore_and_fwd_flag = Column(String(20)) \n\tpickup_datetime = Column(DateTime)\n\tdropoff_datetime = Column(DateTime)\n\tpassenger_count = Column(Integer)\n\ttrip_time_in_secs = Column(Integer)\n\ttrip_distance = Column(Float)\n\n", "repo_name": "12DReflections/cab_trips", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 795, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 14, "usage_type": "call"}, {"api_name": "database.Base", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 19, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.types.DateTime", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.types.DateTime", "line_number": 26, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 27, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 29, "usage_type": "argument"}]} +{"seq_id": "70423693956", "text": "\"\"\"\nNaiveBayes is a generative classifier based on the Naive assumption that features are independent from each other\nP(w1, w2, ..., wn|y) = P(w1|y) P(w2|y) ... P(wn|y)\nThus argmax_{y} (P(y|w1,w2, ... wn)) can be modeled as argmax_{y} P(w1|y) P(w2|y) ... P(wn|y) P(y) using Bayes Rule\nand P(w1, w2, ... ,wn) is constant with respect to argmax_{y} \nPlease refer to lecture notes Chapter 4 for more details\n\"\"\"\n\nfrom collections import Counter, defaultdict\nfrom math import log\nimport operator\n\nimport numpy as np\nfrom Features import Features, tokenize\nfrom Model import *\n\n\nclass NBFeatures(Features):\n @classmethod \n def get_features(cls, tokenized, model):\n features = []\n token_to_embed = model['token_to_embed']\n for token in tokenized:\n embed = token_to_embed.get(token)\n if embed is not None:\n features.append(embed)\n else:\n features.append(token_to_embed['__OOV__'])\n return features\n\nclass NaiveBayes(Model):\n \n def __init__(self, model_file, vocab_size=None):\n super().__init__(model_file)\n self.vocab_size = vocab_size\n \n \n def train(self, input_file):\n \"\"\"\n This method is used to train your models and generated for a given input_file a trained model\n :param input_file: path to training file with a text and a label per each line\n :return: model: trained model \n \"\"\"\n \n wprobdenom = '__ALL__'\n \n nbFeatures = NBFeatures(input_file, vocab_size=self.vocab_size)\n \n model = {\n 'type': NaiveBayes.__class__,\n 'categories_probs': {},\n 'words_probs': {},\n 'options': nbFeatures.labelset,\n 'token_to_embed': nbFeatures.token_to_embed,\n 'embed_to_token': nbFeatures.embed_to_token,\n 'vocab_size': self.vocab_size,\n\n # 'label_to_embed': nbFeatures.label_to_embed,\n # 'embed_to_label': nbFeatures.embed_to_label,\n }\n \n wscores = defaultdict(lambda: Counter())\n cscores = Counter()\n \n features_list = list(map(lambda x: NBFeatures.get_features(x, model), nbFeatures.tokenized_text))\n # Y_true = list(map(lambda x: model['label_to_embed'][x], nbFeatures.labels))\n \n cutoff = int(len(features_list)*0.9)\n X_train, X_valid = features_list[:cutoff], features_list[cutoff:]\n Y_train, Y_valid = nbFeatures.labels[:cutoff], nbFeatures.labels[cutoff:]\n \n for features, label in zip(X_train, Y_train):\n cscores[label] += 1\n for f in features:\n wscores[label][f] += 1\n wscores[label][wprobdenom] += 1\n \n # Laplace Smoothing (+1)\n for label in model['options']:\n wprob = {}\n for token in nbFeatures.token_to_embed:\n embed = model['token_to_embed'][token]\n wprob[embed] = 1 / (wscores[label][wprobdenom] + 1)\n model['words_probs'][label] = wprob\n \n for label in model['options']:\n model['categories_probs'][label] =\\\n cscores[label] / len(features)\n for feature, score in wscores[label].items():\n # Laplace Smoothing (+1)\n # Overriding vocab values if applicable\n model['words_probs'][label][feature] = (score + 1) / (wscores[label][wprobdenom] + 1)\n \n \n # Validate\n train_err =\\\n np.sum(np.array(self._classify(X_train, model)) != np.array(Y_train))/len(Y_train)\n\n valid_err =\\\n np.sum(np.array(self._classify(X_valid, model)) != np.array(Y_valid))/len(Y_valid)\n \n print(f'TrainErr = {train_err}, ValidErr = {valid_err}', end='\\n')\n \n ## Save the model\n self.save_model(model)\n print('Saved model.')\n return model\n\n\n def _classify(self, features_list, model):\n def evaluate(features, option, model):\n score = log(model['categories_probs'][option])\n for f in features:\n score += log(model['words_probs'][option][f])\n return score \n \n preds = []\n for features in features_list:\n scores = {}\n for option in model['options']:\n scores[option] = evaluate(features, option, model)\n preds.append(\n max(scores.items(), key=operator.itemgetter(1))[0]\n )\n return preds\n \n def classify(self, input_file, model):\n \"\"\"\n This method will be called by us for the validation stage and or you can call it for evaluating your code \n on your own splits on top of the training sets seen to you\n :param input_file: path to input file with a text per line without labels\n :param model: the pretrained model\n :return: predictions list\n \"\"\" \n with open(input_file) as file:\n tokenized_sentences =\\\n map(tokenize, file.read().splitlines())\n\n features_list = list(map(lambda x: NBFeatures.get_features(x, model), tokenized_sentences))\n preds = self._classify(features_list, model) \n return preds\n\n\n", "repo_name": "BasRizk/NaiveBayesVsPerceptronNLP", "sub_path": "naivebayes.py", "file_name": "naivebayes.py", "file_ext": "py", "file_size_in_byte": 5304, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "Features.Features", "line_number": 18, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 62, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 62, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "math.log", "line_number": 112, "usage_type": "call"}, {"api_name": "math.log", "line_number": 114, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 123, "usage_type": "call"}, {"api_name": "Features.tokenize", "line_number": 137, "usage_type": "argument"}]} +{"seq_id": "5134181261", "text": "# SPDX-License-Identifier: MIT\n# © 2020-2022 ETH Zurich and other contributors, see AUTHORS.txt for details\n\nfrom gdl_apps.EmotionRecognition.utils.io import load_model\nfrom gdl.datasets.ImageTestDataset import TestData\nimport gdl\nimport numpy as np\nimport os\nimport torch\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom torch.functional import F\nfrom gdl.datasets.AffectNetDataModule import AffectNetExpressions\nfrom gdl.utils.other import get_path_to_assets\nfrom tqdm import tqdm\n\ndef load_dir(lmspath, framepath, start, end):\n lmss = []\n imgs_paths = []\n for i in range(start, end):\n if os.path.isfile(os.path.join(lmspath, str(i) + '.lms')):\n lms = np.loadtxt(os.path.join(\n lmspath, str(i) + '.lms'), dtype=np.float32)\n lmss.append(lms)\n imgs_paths.append(os.path.join(framepath, str(i) + '.jpg'))\n lmss = np.stack(lmss)\n lmss = torch.as_tensor(lmss).cuda()\n return imgs_paths\n\nclass EMOCA_tracker:\n def __init__(self):\n \n model_name = 'ResNet50'\n path_to_models = get_path_to_assets() /\"EmotionRecognition\"\n\n path_to_models = path_to_models / \"image_based_networks\"\n\n self.model = load_model(Path(path_to_models) / model_name)\n print(self.model)\n self.model.cuda()\n self.model.eval()\n\n def __call__(self, images, tform=None):\n\n codedict = self.model(images)\n\n return codedict\n \n def save_images(self, batch, predictions, output_folder):\n # Save the images\n\n softmax = F.softmax(predictions[\"expr_classification\"])\n top_expr = torch.argmax(softmax, dim=1)\n for i in range(len(batch[\"image\"])):\n img = batch[\"image\"][i].cpu().detach().numpy()\n img = img.transpose(1, 2, 0)\n img = img * 255\n img = img.astype(np.uint8)\n\n plt.figure()\n # plot the image with matplotlib \n plt.imshow(img)\n # write valence and arousal to the image\n expr = AffectNetExpressions(int(top_expr[i].item()))\n text = \"Predicted emotion:\\n\"\n text += f'Arousal: {predictions[\"arousal\"][i].item():.2f} \\nValence: {predictions[\"valence\"][i].item():.2f}'\n text += f\"\\nExpression: {expr.name}, {softmax[i][expr.value].item()*100:.2f}%\"\n plt.title(text)\n out_fname = Path(output_folder) / f\"{batch['image_name'][i]}.png\"\n # save the image to the output folder\n \n # axis off \n plt.axis('off')\n plt.savefig(out_fname)\n plt.close()\n\n\ndef emotion_detection(dataset_base, emotion_dir):\n '''\n Face tracker using FLAME model.\n Used to have geometry prior for nerf sampling.\n '''\n\n id_dir = dataset_base\n debug_emotions = os.path.join(id_dir, 'debug', 'emotions_imgs')\n Path(debug_emotions).mkdir(parents=True, exist_ok=True)\n\n emoca_tracker = EMOCA_tracker()\n\n # Run deca on all frames\n testdata = TestData(os.path.join(id_dir, 'frames'), face_detector=\"fan\", max_detection=20)\n \n for i, data in enumerate(tqdm(testdata)):\n batch = testdata[i]\n batch[\"image\"] = batch[\"image\"].cuda()\n predictions = emoca_tracker(batch)\n npy_pred = {k: v.cpu().detach().numpy() for k,v in predictions.items()}\n np.save(os.path.join(emotion_dir, '%5d.npy' % i), npy_pred)\n\n emoca_tracker.save_images(batch, predictions, debug_emotions)\n\nif __name__ == '__main__':\n\n dataset_base = '/media/apennino/EmotionDetection/Test/Greta/'\n emotion_dir = '/media/apennino/EmotionDetection/Test/Greta/emotions/'\n emotion_detection(dataset_base, emotion_dir)\n", "repo_name": "mediatechnologycenter/AvatarForge", "sub_path": "motion-gan-pipeline/preprocessing/emoca_tracker.py", "file_name": "emoca_tracker.py", "file_ext": "py", "file_size_in_byte": 3711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.isfile", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 27, "usage_type": "call"}, {"api_name": "gdl.utils.other.get_path_to_assets", "line_number": 34, "usage_type": "call"}, {"api_name": "gdl_apps.EmotionRecognition.utils.io.load_model", "line_number": 38, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.functional.F.softmax", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.functional.F", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.argmax", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "gdl.datasets.AffectNetDataModule.AffectNetExpressions", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 86, "usage_type": "call"}, {"api_name": "gdl.datasets.ImageTestDataset.TestData", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "39616804491", "text": "# pylint: disable=unused-variable\nimport pytest\nfrom starkware.starknet.public.abi import (\n get_selector_from_name,\n get_storage_var_address,\n)\n\nfrom starknet_py.net.client_models import Call\nfrom starknet_py.net.full_node_client import FullNodeClient\nfrom starknet_py.net.networks import TESTNET\n\n\ndef test_init():\n # docs-start: init\n full_node_client = FullNodeClient(node_url=\"https://your.node.url\", net=TESTNET)\n # docs-end: init\n\n\n@pytest.mark.asyncio\nasync def test_get_block(full_node_client):\n # docs-start: get_block\n block = await full_node_client.get_block(block_number=\"latest\")\n block = await full_node_client.get_block(block_number=0)\n # or\n block = await full_node_client.get_block(block_hash=\"0x0\")\n # docs-end: get_block\n\n\n@pytest.mark.asyncio\nasync def test_get_state_update(full_node_client):\n # docs-start: get_state_update\n state_update = await full_node_client.get_state_update(block_number=\"latest\")\n state_update = await full_node_client.get_state_update(block_number=0)\n # or\n state_update = await full_node_client.get_state_update(block_hash=\"0x0\")\n # docs-end: get_state_update\n\n\n@pytest.mark.asyncio\nasync def test_get_storage_at(full_node_client, map_contract):\n address = map_contract.address\n # docs-start: get_storage_at\n storage_value = await full_node_client.get_storage_at(\n contract_address=address,\n key=get_storage_var_address(\"storage_var name\"),\n block_number=\"latest\",\n )\n # docs-end: get_storage_at\n\n\n@pytest.mark.asyncio\nasync def test_get_transaction(full_node_client, declare_transaction_hash):\n # docs-start: get_transaction\n transaction_hash = 0x1 or 1 or \"0x1\"\n # docs-end: get_transaction\n transaction_hash = declare_transaction_hash\n # docs-start: get_transaction\n transaction = await full_node_client.get_transaction(tx_hash=transaction_hash)\n # docs-end: get_transaction\n\n\n@pytest.mark.asyncio\nasync def test_get_transaction_receipt(full_node_client, declare_transaction_hash):\n transaction_hash = declare_transaction_hash\n # docs-start: get_transaction_receipt\n transaction_receipt = await full_node_client.get_transaction_receipt(\n tx_hash=transaction_hash\n )\n # docs-end: get_transaction_receipt\n\n\n@pytest.mark.asyncio\nasync def test_estimate_fee(full_node_account, deploy_account_transaction):\n full_node_client = full_node_account.client\n transaction = deploy_account_transaction\n # docs-start: estimate_fee\n estimated_fee = await full_node_client.estimate_fee(tx=transaction)\n # docs-end: estimate_fee\n\n\n@pytest.mark.asyncio\nasync def test_call_contract(full_node_client, contract_address):\n # docs-start: call_contract\n response = await full_node_client.call_contract(\n call=Call(\n to_addr=contract_address,\n selector=get_selector_from_name(\"increase_balance\"),\n calldata=[123],\n ),\n block_number=\"latest\",\n )\n # docs-end: call_contract\n\n\n@pytest.mark.asyncio\nasync def test_get_class_hash_at(full_node_client, contract_address):\n # docs-start: get_class_hash_at\n address = 0x1 or 1 or \"0x1\"\n # docs-end: get_class_hash_at\n address = contract_address\n # docs-start: get_class_hash_at\n class_hash = await full_node_client.get_class_hash_at(\n contract_address=address, block_number=\"latest\"\n )\n # docs-end: get_class_hash_at\n\n\n@pytest.mark.asyncio\nasync def test_get_class_by_hash(full_node_client, class_hash):\n # docs-start: get_class_by_hash\n hash_ = 0x1 or 1 or \"0x1\"\n # docs-end: get_class_by_hash\n hash_ = class_hash\n # docs-start: get_class_by_hash\n contract_class = await full_node_client.get_class_by_hash(class_hash=hash_)\n # docs-end: get_class_by_hash\n\n\n@pytest.mark.asyncio\nasync def test_get_transaction_by_block_id(full_node_client):\n # docs-start: get_transaction_by_block_id\n transaction = await full_node_client.get_transaction_by_block_id(\n index=0, block_number=\"latest\"\n )\n # docs-end: get_transaction_by_block_id\n\n\n@pytest.mark.asyncio\nasync def test_get_block_transaction_count(full_node_client):\n # docs-start: get_block_transaction_count\n num_of_transactions = await full_node_client.get_block_transaction_count(\n block_number=\"latest\"\n )\n # docs-end: get_block_transaction_count\n\n\n@pytest.mark.asyncio\nasync def test_get_class_at(full_node_client, contract_address):\n # docs-start: get_class_at\n address = 0x1 or 1 or \"0x1\"\n # docs-end: get_class_at\n address = contract_address\n # docs-start: get_class_at\n contract_class = await full_node_client.get_class_at(\n contract_address=address, block_number=\"latest\"\n )\n # docs-end: get_class_at\n\n\n@pytest.mark.asyncio\nasync def test_get_contract_nonce(full_node_client, contract_address):\n # docs-start: get_contract_nonce\n address = 0x1 or 1 or \"0x1\"\n # docs-end: get_contract_nonce\n address = contract_address\n # docs-start: get_contract_nonce\n nonce = await full_node_client.get_contract_nonce(\n contract_address=address, block_number=\"latest\"\n )\n # docs-end: get_contract_nonce\n", "repo_name": "chain-cpu/starknet-sdk", "sub_path": "starknet_py/tests/e2e/docs/code_examples/test_full_node_client.py", "file_name": "test_full_node_client.py", "file_ext": "py", "file_size_in_byte": 5175, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "starknet_py.net.full_node_client.FullNodeClient", "line_number": 15, "usage_type": "call"}, {"api_name": "starknet_py.net.networks.TESTNET", "line_number": 15, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}, {"api_name": "starkware.starknet.public.abi.get_storage_var_address", "line_number": 45, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 72, "usage_type": "attribute"}, {"api_name": "starknet_py.net.client_models.Call", "line_number": 85, "usage_type": "call"}, {"api_name": "starkware.starknet.public.abi.get_selector_from_name", "line_number": 87, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 150, "usage_type": "attribute"}]} +{"seq_id": "33322591725", "text": "# coding=utf-8\n__author__ = 'Feely'\n\nimport time\nimport multiprocessing\nimport sys\n\nimport DrawNO\nimport conn\nimport GDSFC\n\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.excepthook = lambda *args: None\nSTDERR = sys.stderr\n\n\n#重庆时时彩\ndef ssc_drawnumber(ssc_type,db_ssc_type):\n returndate=''\n while True:\n #调用爬虫,获取开奖信息\n assert isinstance(ssc_type, str)\n draw_date,draw_code, draw_time_str= DrawNO.drawnumber(ssc_type)\n if draw_code == '0' or draw_date <= returndate:\n pass\n else:\n returndate=conn.kjdata(t2=draw_code,cid=db_ssc_type,t1=draw_date,t3=draw_time_str)\n time.sleep(180)\n # draw_time = datetime.strptime(draw_time_str, \"%Y-%m-%d %H:%M\")\n # ms.IsInfoExists(SPname='ibc.dbo.IsInfoExists',lottery_type=db_ssc_type,lottery_num=draw_date,kjCodes=draw_code,kjtime=draw_time,addtime=datetime.now())\n # time.sleep(1)\n # ms.SYSPaiJiang(SPname='ibc.dbo.SYSPaiJiang',kjExpect=draw_date,kjTime=draw_time_str,kjCode=draw_code,ltType=db_ssc_type)\n time.sleep(30)\n\ndef main():\n \"\"\"\n\n :rtype : Null\n \"\"\"\n #重庆时时彩\n ssc_type='cqssc'\n db_ssc_type='1'\n jobs=[]\n for i in range(2):\n p_cq=multiprocessing.Process(name='CQSSC',target=ssc_drawnumber,args=(ssc_type,db_ssc_type,))\n jobs.append(p_cq)\n p_cq.start()\n p_cq.join(timeout=10)\nif __name__ == \"__main__\":\n main()", "repo_name": "FeelySong/SJLottery", "sub_path": "kj/SSC/sjNumber.py", "file_name": "sjNumber.py", "file_ext": "py", "file_size_in_byte": 1460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.excepthook", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "DrawNO.drawnumber", "line_number": 25, "usage_type": "call"}, {"api_name": "conn.kjdata", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "27922822921", "text": "import os\nimport numpy as np\nimport json\nimport pickle as pkl\nimport matplotlib.pyplot as plt\n\nfrom PIL import Image\nfrom glob import glob\nfrom tqdm import tqdm\n\nfrom c2d_models import *\n\ndef load_json(file):\n if \".json\" not in file: file += \".json\"\n with open(file, \"r\") as f:\n contents = json.load(f)\n return contents\n\ndef dump_json(contents, file):\n if \".json\" not in file: file += \".json\"\n with open(file, \"w\") as f:\n json.dump(contents, f)\n return True\n\ndef load_pickle(file):\n if \".pkl\" not in file: file += \".pkl\"\n with open(file, \"rb\") as f:\n contents = pkl.load(f)\n return contents\n \ndef dump_pickle(contents, file):\n if \".pkl\" not in file: file += \".pkl\"\n with open(file, \"wb\") as f:\n pkl.dump(contents, f)\n return True\n\ndef read_image(image_path, resize_to = None):\n img = Image.open(image_path)\n if resize_to != None:\n img = img.resize(resize_to)\n return np.array(img)\n\ndef save_image(image_array, file_path):\n try:\n image_array = im_to_255(image_array)\n Image.fromarray(image_array).save(file_path)\n return True\n except Exception as e:\n print(e)\n return False\n\ndef join_paths(paths):\n path = \"\"\n for tag in paths:\n path = os.path.join(path, tag)\n return path\n\ndef read_directory_contents(directory):\n if \"*\" not in directory: directory = join_paths([directory, \"*\"])\n return sorted(glob(directory))\n\ndef create_directory(path):\n if not os.path.exists(path): os.mkdir(path)\n \ndef INFO(*list_of_strings):\n list_of_strings = list(list_of_strings)\n print(\"-\"*40)\n print(\"\\n\".join(list_of_strings))\n print(\"-\"*40)\n \ndef normalize(x):\n return (x - x.min())/(x.max() - x.min())\n\ndef im_to_255(x):\n if x.max() <= 1: return (x*255).astype(np.uint8)\n return x\n\ndef get_model(model_path, rec = True, max_value=1000):\n if rec: model = C2D_AE_128_3x3(isTrain = True)\n else: model = C2D_AE_128_3x3(isTrain = False, max_value = max_value)\n model.model.load_weights(model_path)\n return model.model\n\ndef im_3(x, channel_axis = -1):\n if len(x.shape) < 3:\n x = np.expand_dims(x, axis = channel_axis)\n if x.shape[channel_axis] < 3:\n x = x.repeat((1 + 3 - x.shape[channel_axis]), axis = channel_axis)\n return x\n ", "repo_name": "ambareeshravi/AD_AE_XAI", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 22, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 28, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 46, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "35394957031", "text": "import sys\nfrom base import make_app\n\n\nif __name__ == '__main__':\n\toptions = {}\n\tif len(sys.argv) > 1:\n\t\tif sys.argv[1] == 'upgradedb':\n\t\t\tfrom alembic.config import main\n\t\t\tmain('upgrade head'.split(' '), 'alembic')\n\t\n\t\t\texit(0)\n\t\tfor arg in sys.argv[1:]:\n\t\t\tk, v = arg.strip('--').split('=', 1)\n\t\t\toptions[k] = v\n\tapp = make_app(options)\n\tapp.run()\n", "repo_name": "yyotsuba/session_book", "sub_path": "run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 351, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "alembic.config.main", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "base.make_app", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "19233700652", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport shutil\n\nTRASH = os.path.expanduser(\"~\") + \"/.trash/\"\n\ndef write_log(trash_path, orig_path):\n with open(TRASH + \"TRASH_LOG.log\", \"a\") as f:\n f.write(TRASH + trash_path + \"\\n\" + orig_path + \"\\n\")\n\n\ndef authenticate_path(file_to_rm):\n file_full = os.path.abspath(file_to_rm)\n file_name = os.path.split(file_full)[-1]\n dest_file_full = file_full\n dest_file_name = file_name\n # same name exists in trash\n if os.path.exists(TRASH + file_name):\n count = 0\n # find which number makes this a unique value\n while (os.path.exists(TRASH + file_name + \".\" + str(count))):\n count += 1\n dest_file_name += \".\" + str(count)\n dest_file_full += \".\" + str(count)\n\n return file_full, dest_file_full, file_name, dest_file_name\n\n\ndef recur(base, perm):\n if perm:\n shutil.rmtree(base)\n else:\n dir_full, dest_dir_full, dir_name, dest_dir_name = authenticate_path(base)\n shutil.move(dir_full, TRASH + dest_dir_name)\n write_log(dest_dir_name, dir_full)\n \n\ndef single(file_to_rm, perm):\n if not os.path.isfile(file_to_rm):\n print(\"Not a regular file!\")\n return\n if perm:\n os.remove(file_to_rm)\n else:\n file_full, dest_file_full, file_name, dest_file_name = authenticate_path(file_to_rm)\n shutil.move(file_full, TRASH + dest_file_name)\n write_log(dest_file_name, file_full)\n \n\n\n\n\ndef clean():\n recur(TRASH, True)\n os.mkdir(TRASH)\n f = open(TRASH + \"TRASH_LOG.log\", \"w+\")\n f.close()\n\n\ndef undo_delete():\n pairs = []\n with open(TRASH + \"TRASH_LOG.log\", \"r\") as log:\n for line in log:\n pairs.append(line.strip())\n\n # make sure there is value to restore\n if len(pairs) < 1:\n print(\"Your trash is empty.\")\n return\n\n # to_restore = [trash, dest]\n to_restore = []\n to_restore.append(pairs[-2])\n to_restore.append(pairs[-1])\n if os.path.exists(to_restore[1]):\n print(\"There will be a name error; resolve the conflict in the destination.\")\n return\n else:\n shutil.move(to_restore[0], to_restore[1])\n \n pairs = pairs[0:-2]\n with open(TRASH + \"TRASH_LOG.log\", \"w\") as log:\n # all but the last\n for p in pairs:\n log.write(p + \"\\n\")\n\n\nparser = argparse.ArgumentParser()\nremove_vs_clean = parser.add_mutually_exclusive_group()\nremove_vs_clean.add_argument(\"file\", help=\"Remove FILE to ~/.trash.\", \n metavar=\"FILE\", nargs=\"*\", action=\"append\", default=[])\nremove_vs_clean.add_argument(\"-r\", \"--recursive\", help=\"Remove directory to ~/.trash.\", \n metavar=\"DIR\", nargs=\"*\", action=\"append\", default=[])\nremove_vs_clean.add_argument(\"-e\", \"--empty-trash\", help=\"Empty ~/.trash.\", action=\"store_true\")\nremove_vs_clean.add_argument(\"-u\", \"--undo\", help=\"Undo last delete.\", action=\"store_true\")\nparser.add_argument(\"--permanent\", help=\"Permanently delete file or directory.\", action=\"store_true\")\n\nargs = parser.parse_args()\n\n\n# verify that the trash exists\nif not os.path.exists(TRASH):\n os.mkdir(TRASH)\nif not os.path.exists(TRASH + \"TRASH_LOG.log\"):\n f = open(TRASH + \"TRASH_LOG.log\", \"w\")\n f.close()\n\n# parse the arguments\nif args.recursive:\n for arg in args.recursive[0]: # not sure why, but args.recursive is 2d\n recur(arg, args.permanent)\n\nif args.file:\n for arg in args.file[0]: # again; not sure why, but args.file is 2d\n single(arg, args.permanent)\n\nif args.empty_trash:\n clean()\n\nif args.undo:\n undo_delete()\n\n", "repo_name": "ClaytonMcCray/fakeRM", "sub_path": "tr.py", "file_name": "tr.py", "file_ext": "py", "file_size_in_byte": 3659, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.expanduser", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 33, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 45, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 48, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 81, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}]} +{"seq_id": "1832679764", "text": "import timm\nimport torch\nimport numpy as np\nfrom torchsummary import summary\n\nfrom nni.compression.pytorch.pruning import L1NormPruner\nfrom nni.compression.pytorch.speedup import ModelSpeedup\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef test_exclude():\n batch_size = 16\n inference_input = torch.randn(batch_size, 3, 360, 640).to(device)\n\n sparsity = 0.8\n model = timm.create_model('efficientnet_lite0', pretrained=True)\n model.to(device)\n print(\"Model Structure...\")\n print(model)\n\n print(\"\\nStarting Pruning Process...\")\n config_list = None\n # create pruned model\n config_list = [{\n 'sparsity_per_layer': sparsity,\n 'op_types': ['Linear', 'Conv2d']\n }, {\n 'exclude': True,\n 'op_names': ['conv_stem']\n }]\n\n print(\"\\nConfig List:\", config_list)\n\n dummy_input = torch.rand(1, 3, 360, 640).to(device)\n pruner = L1NormPruner(model, config_list)\n\n # compress the model and generate the masks\n _, masks = pruner.compress()\n\n # need to unwrap the model, if the model is wrapped before speedup\n pruner._unwrap_model()\n\n # speedup the model, for more information about speedup, please refer :doc:`pruning_speedup`.\n ModelSpeedup(model, dummy_input, masks).speedup_model()\n\n print(\"\\n\\n----------- Model Summary: Pruned at {}% with NNI -----------\\n\".format(sparsity * 100))\n if torch.cuda.is_available():\n model.cuda()\n summary(model, (3, 360, 640))\n\n\ntest_exclude()\n", "repo_name": "pmmitche/Masters-Thesis", "sub_path": "minimal_pruning_error_example.py", "file_name": "minimal_pruning_error_example.py", "file_ext": "py", "file_size_in_byte": 1503, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.device", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.randn", "line_number": 15, "usage_type": "call"}, {"api_name": "timm.create_model", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 36, "usage_type": "call"}, {"api_name": "nni.compression.pytorch.pruning.L1NormPruner", "line_number": 37, "usage_type": "call"}, {"api_name": "nni.compression.pytorch.speedup.ModelSpeedup", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torchsummary.summary", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "12976308799", "text": "from django.shortcuts import render, redirect\nfrom rest_framework import viewsets\n\nfrom .models import University\nfrom .serializers import UniversitySerializer\nfrom .forms import UniversityForm\n\n\ndef university(request):\n university = University.objects.using(\"university_db\").all()\n \n if request.method == \"POST\":\n form = UniversityForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"/university\")\n else:\n form = UniversityForm()\n\n context = {\n 'university': university,\n 'form': form,\n }\n return render(request, \"university/university.html\", context)\n\n\nclass UniversityViewSet(viewsets.ModelViewSet):\n queryset = University.objects.using(\"university_db\").all()\n serializer_class = UniversitySerializer\n\n ", "repo_name": "surajkarki66/django-multiple-dbs-and-analytics", "sub_path": "university/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 809, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "models.University.objects.using", "line_number": 10, "usage_type": "call"}, {"api_name": "models.University.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.University", "line_number": 10, "usage_type": "name"}, {"api_name": "forms.UniversityForm", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.UniversityForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 27, "usage_type": "name"}, {"api_name": "models.University.objects.using", "line_number": 28, "usage_type": "call"}, {"api_name": "models.University.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "models.University", "line_number": 28, "usage_type": "name"}, {"api_name": "serializers.UniversitySerializer", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "17344435702", "text": "import math\nimport numpy as np\n\nfrom ..resource.workspace import Workspace\nfrom PIL import Image\nimport io\n\n\nclass Texture:\n \"\"\"\n Holds a texture with width, height and its texture data in rgba.\n \"\"\"\n def __init__(self):\n self.w = 0\n self.h = 0\n self.data = None # i = y * (w * 4) + x * 4 = (r, g, b, a)\n\n @classmethod\n def load_from_file(cls, workspace, location, enforce_square=True):\n \"\"\"\n Load a texture from a file\n\n :param enforce_square: if true, crop image to square (eliminates problems in things like animated textures)\n :param workspace: workspace to load from\n :type workspace: Workspace\n\n :param location: location of file\n :return:\n \"\"\"\n with workspace.get_file(location, 'rb') as f:\n im: Image.Image = Image.open(io.BytesIO(f.read()))\n im.load()\n self = cls()\n\n if im.width != im.height and enforce_square:\n # for now we just crop out animation frames\n im.crop((0, 0, im.width-1, im.width-1))\n\n self.w = im.width\n self.h = im.height\n if len(im.getbands()) == 3:\n im.putalpha(255)\n self.data = im.tobytes()\n return self\n\n\nclass ModelAtlas:\n TEX_SIZE = 128\n\n \"\"\"\n A ModelAtlas holds a bunch of textures on a grid, so the shader only needs one texture per block.\n\n Representation is a grid of 16x16 textures. (animated textures only use their first frame)\n Size is calculated once, and drawn at construction. Otherwise similar api to a :py:class:`Texture`.\n \"\"\"\n\n def __init__(self, textures):\n \"\"\"\n Create a new ModelAtlas\n\n :param textures: dictionary of names to :py:class:`Texture` instances\n \"\"\"\n self.textures = textures\n self.data = None\n self.size = [-1, -1]\n self._positions = {}\n\n self._layout()\n\n def _subgrid_layout(self, smaller, new_size, small_size, extra=()):\n \"\"\"\n Layout a subgrid\n\n :param smaller: list of smaller tiles\n :param new_size: size to pack to\n :param small_size: incoming size\n :param extra: things that are already new_size\n :return: list of locations\n \"\"\"\n\n size_factor = new_size / small_size\n grids = [\n []\n ]\n\n c_pos = [0, 0]\n\n for i in smaller:\n grids[-1].append((i, c_pos.copy()))\n c_pos[0] += small_size\n if c_pos[0] == size_factor * small_size:\n c_pos[1] += small_size\n c_pos[0] = 0\n if c_pos[1] == size_factor * small_size:\n grids.append([])\n c_pos = [0, 0]\n\n if not grids[-1]:\n grids = grids[:-1]\n\n for i in extra:\n grids.append([(i, [0, 0])])\n\n return grids\n\n def _blit(self, texture, to):\n \"\"\"\n Blit a texture to the atlas. Also updates the entry in the _positions table\n\n .. danger:\n Only works while laying out, i.e. when the array is 3d\n\n :param texture: blit me\n :param to: here\n \"\"\"\n self._positions[texture] = to\n self.data[to[1]:to[1] + self.textures[texture].h, to[0]:to[0] + self.textures[texture].w] = \\\n np.frombuffer(self.textures[texture].data, dtype=np.uint8).reshape((self.textures[texture].h,\n self.textures[texture].w, 4))\n # that crazy thing does a blit with numpy magic (maybe) (hopefully)\n\n def _draw_grid(self, c_pos, grid):\n \"\"\"\n Recursively draw this grid, starting at c_pos\n\n :param c_pos: start at\n :param grid: draw this\n \"\"\"\n for element in grid:\n to_draw, at = element\n a_pos = c_pos[0] + at[0], c_pos[1] + at[1]\n if type(to_draw) is str:\n self._blit(to_draw, a_pos)\n else:\n self._draw_grid(a_pos, grid)\n\n def _layout(self):\n \"\"\"\n Layout the modelatlas\n \"\"\"\n\n size_filtered = {}\n sizes = []\n for i in self.textures:\n if self.textures[i].w in size_filtered:\n size_filtered[self.textures[i].w].append(i)\n else:\n size_filtered[self.textures[i].w] = [i]\n sizes.append(self.textures[i].w)\n sizes.sort()\n grids = []\n previous_size = sizes[0]\n for i in sizes:\n grids = self._subgrid_layout(grids, i, previous_size, size_filtered[i])\n previous_size = i\n\n h_size = sizes[-1]\n row_count = min(len(grids), ModelAtlas.TEX_SIZE // h_size)\n if row_count < 1:\n row_count = 1\n h_size = row_count * sizes[-1]\n columns = math.ceil(len(grids)/row_count)\n\n self.data = np.zeros((columns*sizes[-1], h_size, 4))\n\n c_pos = [0, 0]\n for i in grids:\n self._draw_grid(c_pos, i)\n c_pos[0] += sizes[-1]\n if c_pos[0] == h_size:\n c_pos[0] = 0\n c_pos[1] += sizes[-1]\n\n self.data = self.data.reshape(4*h_size*columns*sizes[-1])\n self.size = [h_size, columns*sizes[-1]]\n\n def uv_for(self, tex, u, v):\n \"\"\"\n Get the UV for a texture in this atlas\n\n :param tex: texture name\n :param u: u, in pixels\n :param v: v, in pixels\n :return: U, V (floats)\n \"\"\"\n c_pos = self._positions[tex]\n a_pos = c_pos[0] + u, c_pos[1] + v\n return a_pos[0] / self.size[0], a_pos[1] / self.size[1]\n", "repo_name": "mincrmatt12/MCJsonTool", "sub_path": "mcjsontool/render/texture.py", "file_name": "texture.py", "file_ext": "py", "file_size_in_byte": 5626, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "PIL.Image.Image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 31, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 118, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "23888546657", "text": "import pandas as pd\n\n#importa os dados e suas saidas\nprevisores = pd.read_csv('../breast_cancer_dataset/entradas_breast.csv')\nclasse = pd.read_csv('../breast_cancer_dataset/saidas_breast.csv')\n\nfrom sklearn.model_selection import train_test_split\nprevisores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split(previsores,classe, test_size =0.25)\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nclassificador = Sequential()\n# formula de partida para o units\n# entradas + saidas /2\n# (30+1)/2 = 15,5 => 16 \n# primeira camada oculta\nclassificador.add(Dense(units = 16, activation='relu', \n kernel_initializer='random_uniform', input_dim = 30))\n# camada de saida\nclassificador.add(Dense(units = 1, activation='sigmoid'))\n\n# cria a rede e define alguns parametros de treinamento\nclassificador.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['binary_accuracy'])\n\n# batch_size = numero de registros até atualizar pesos\n# epochs = numero de ciclos completos de interação com os dados\nclassificador.fit(previsores_treinamento,classe_treinamento, batch_size=10, epochs=100)\n\nprevisoes = classificador.predict(previsores_teste)\nprevisoes = (previsoes >0.5)\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\nprecisao = accuracy_score(classe_teste,previsoes)\n\nmatrix = confusion_matrix(classe_teste,previsoes)\n\nresultado = classificador.evaluate(previsores_teste,classe_teste)\n", "repo_name": "Allanfd12/Curso-Deep-Learning", "sub_path": "breast_cancer/breast_cancer_simples/breast_cancer_simples.py", "file_name": "breast_cancer_simples.py", "file_ext": "py", "file_size_in_byte": 1506, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "13579597660", "text": "#!/usr/bin/env python3\n\nfrom urllib.request import urlopen # for getplace\nimport json # for getplace\nimport urllib.request, urllib.parse, urllib.error, codecs # for geonames\n\ndef getplace(lon, lat):\n \"\"\"\n Convert lon, lat to country\n DS - adapted from http://stackoverflow.com/questions/20169467/how-to-convert-from-longitude-and-latitude-to-country-or-city\n \"\"\"\n url = \"http://maps.googleapis.com/maps/api/geocode/json?\"\n url += \"latlng=%s,%s&sensor=false\" % (lat, lon)\n v = urlopen(url).read()\n j = json.loads(v)\n try:\n components = j['results'][0]['address_components']\n except: \n return '-'\n\n #components = { 'types': {'country':'-', 'town':'-'}} \n country = town = None\n for c in components:\n #print('CTYPES ', c)\n #print('CTYPES ', c['types'])\n if \"country\" in c['types']:\n country = c['long_name']\n if \"postal_town\" in c['types']:\n town = c['long_name']\n return country #, town, continent\n\n\n#------------------------------------------------------------\n\"\"\"\nRetrieve a list of information about countries, pulled from GeoNames.\nDS adapted from: from https://www.djangosnippets.org/snippets/1049/\n\nExample entry:\n\n {u'Area(in sq km)': u'33843',\n u'Capital': u'Chi\\\\u015fin\\\\u0103u',\n u'Continent': u'EU',\n u'Country': u'Moldova',\n u'CurrencyCode': u'MDL',\n u'CurrencyName': u'Leu',\n u'EquivalentFipsCode': u'',\n u'ISO': u'MD',\n u'ISO-Numeric': u'498',\n u'ISO3': u'MDA',\n u'Languages': u'mo,ro,ru,gag,tr',\n u'Phone': u'373',\n u'Population': u'4324000',\n u'Postal Code Format': u'MD-####',\n u'Postal Code Regex': u'^(?:MD)*(\\\\d{4})$',\n u'fips': u'MD',\n u'geonameid': u'617790',\n u'neighbours': u'RO,UA',\n u'tld': u'.md'}\n\"\"\"\n\n\nCOUNTRY_INFO_URL = \"http://download.geonames.org/export/dump/countryInfo.txt\"\n\ndef get_geonames_country_data():\n \"Returns a list of dictionaries, each representing a country\"\n udata = urllib.request.urlopen(COUNTRY_INFO_URL).read().decode('utf8')\n # Strip the BOM\n if udata[0] == codecs.BOM_UTF8.decode('utf8'):\n udata = udata[1:]\n # Ignore blank lines\n lines = [l for l in udata.split('\\n') if l]\n # Find the line with the headers (starts #ISO)\n header_line = [l for l in lines if l.startswith('#ISO')][0]\n headers = header_line[1:].split('\\t')\n # Now get all the countries\n country_lines = [l for l in lines if not l.startswith('#')]\n countries = []\n for line in country_lines:\n countries.append(dict(list(zip(headers, line.split('\\t')))))\n lastDS = countries[-1]\n wanted = 'Country ISO ISO3 Continent tld'.split()\n #if 'Germany' in line:\n # print('DS',lastDS['Country'],lastDS['ISO'], lastDS['ISO3'])\n #if 'United' in line:\n # print('DS',lastDS['Country'],lastDS['ISO'], lastDS['ISO3'])\n #for k in wanted:\n # print('DSK', k, lastDS[k] )\n #for kk in lastDS.keys():\n ## print('DSKK', kk, lastDS[kk] )\n #DS\n #nDS = 0\n #for h in headers:\n # print('h', h)\n # if h == 'Germany': print ('DE', nDS)\n # nDS += 1\n return countries\n\ndef getCountryInfo(country):\n\n countries=get_geonames_country_data()\n iso2, iso3, continent = '-' * 3\n for c in countries:\n #print 'Checking ', c['Country'], country\n if c['Country'] == country:\n iso2 = c['ISO']\n iso3 = c['ISO3']\n continent = c['Continent']\n return iso2, iso3, continent\n\ndef lonlat2ccodes(lon,lat):\n country = getplace(lon, lat)\n iso2, iso3, continent = getCountryInfo(country)\n return iso2, iso3, country, continent\n\nif __name__ == '__main__':\n\n import sys\n\n if len(sys.argv) > 1:\n if sys.argv[1] == '--xy':\n try:\n x, y = list(map(float, sys.argv[2].split()))\n country = getplace(x, y)\n iso2, iso3, continent = getCountryInfo(country)\n print('step-by-step', x, y, ' => ', iso2, iso3, country, continent )\n iso2, iso3, country, continent = lonlat2ccodes(x,y)\n print('lonlat2codes', x, y, ' => ', iso2, iso3, country, continent )\n except:\n #print(help(CountryStuff))\n sys.exit('Usage: CountryStuff --xy \"lon lat\"')\n\n else: \n \n # test google suggestion\n #print(getplace(0.1,51.1))\n #print(getplace(0.1,51.2))\n #print(getplace(0.1,51.3))\n print('Mace Heed: ', getplace( -9.00,53.3175))\n print('Tudor Hill: ', getplace(-64.87,32.27))\n x = -(11.0+53/60.0)\n y=78.0+54/60.0\n print('Zeppelin: ', getplace(x, y))\n \n # test geoname suggestion\n #g=get_geonames_country_data()\n testers = 'Germany Turkey Canada Greenland China India'.split()\n testers.append('New Zealand')\n for ccs in testers:\n iso2, iso3, continent = getCountryInfo(ccs)\n print(ccs, 'ISO2:', iso2, 'ISO3:', iso3, 'Cont:', continent)\n \n", "repo_name": "mifads/pyscripts", "sub_path": "emxgeo/geocountries.py", "file_name": "geocountries.py", "file_ext": "py", "file_size_in_byte": 4891, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "urllib.request.urlopen", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 66, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 66, "usage_type": "name"}, {"api_name": "codecs.BOM_UTF8.decode", "line_number": 68, "usage_type": "call"}, {"api_name": "codecs.BOM_UTF8", "line_number": 68, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 120, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 122, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "24739390901", "text": "import pandas as pd\nimport os\nfrom tqdm import tqdm\ndata = pd.read_csv(\"../MSR_data_cleaned.csv\")\ndata_length = data.shape[0]\nprint(data_length)\nif not os.path.exists(\"../data/raw_code_Fan\"):\n os.mkdir(\"../data/raw_code_Fan\")\nvul_num = 0\nfor i in tqdm(range(data_length)):\n func_after = data.at[i, \"func_after\"]\n func_before = data.at[i,\"func_before\"]\n vul = data.at[i,\"vul\"]\n if vul ==1:\n vul_num = vul_num+1\n data_name = str(i)+\"_\"+str(vul)+\".c\"\n if func_after != func_before and vul != 1:\n print(data_name)\n filename = data_name\n # 文件有重名现象\n if os.path.exists(\"../data/raw_code_Fan\" + \"/\" + filename):\n with open(\"../data/raw_code_Fan\" + \"/\" + filename, 'r') as f:\n func = f.read()\n if func == func_after:\n print(filename)\n continue\n else:\n with open(\"../data/raw_code_Fan\" + \"/\" +filename, 'w') as f:\n f.write(func_before)\n i = i + 1\n with open(\"../data/raw_code_Fan\" + \"/\" + filename, 'w') as f:\n f.write(func_before)\nprint(vul_num)\n", "repo_name": "202221632987/Leev", "sub_path": "pre_code/make_code_Fan.py", "file_name": "make_code_Fan.py", "file_ext": "py", "file_size_in_byte": 1120, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 8, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "38547795679", "text": "\"\"\"\n @Time : 203/22/19 10:40\n @Author : TaylorMei\n @Email : mhy845879017@gmail.com\n \n @Project : iccv\n @File : mask_detection.py\n @Function:\n \n\"\"\"\n\"\"\"\n @Time : 203/12/19 19:00\n @Author : TaylorMei\n @Email : mhy845879017@gmail.com\n\n @Project : iccv\n @File : mask_mirror.py\n @Function:\n\n\"\"\"\nimport os\nimport numpy as np\nimport skimage.io\n\ndetection_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/mask_rcnn_white_c_crop_resize/'\nimage_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/color_mirror/'\nmask_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/taylor5_384/'\noutput_path = '/media/iccd/TAYLORMEI/mirror/camera-ready/green_detection/'\n\nif not os.path.exists(output_path):\n os.mkdir(output_path)\n\nimglist = os.listdir(detection_path)\nfor i, imgname in enumerate(imglist):\n print(i, imgname)\n detection = skimage.io.imread(detection_path + imgname)\n image = skimage.io.imread(image_path + imgname[:-4] + '.jpg')\n mask = skimage.io.imread(mask_path + imgname)\n print(detection.shape)\n print(mask.shape)\n\n output = np.zeros_like(detection)\n\n for j in range(detection.shape[2]):\n if j != 3:\n output[:, :, j] = np.where(mask >= 127.5, image[:, :, j], detection[:, :, j])\n else:\n output[:, :, j] = detection[:, :, j]\n\n skimage.io.imsave(output_path + imgname, output)", "repo_name": "Mhaiyang/iccv", "sub_path": "utils/mask_detection2.py", "file_name": "mask_detection2.py", "file_ext": "py", "file_size_in_byte": 1353, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "61", "api": [{"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "skimage.io.io.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 36, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 36, "usage_type": "name"}, {"api_name": "skimage.io.io.imread", "line_number": 37, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 37, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 37, "usage_type": "name"}, {"api_name": "skimage.io.io.imread", "line_number": 38, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 38, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 46, "usage_type": "call"}, {"api_name": "skimage.io.io.imsave", "line_number": 50, "usage_type": "call"}, {"api_name": "skimage.io.io", "line_number": 50, "usage_type": "attribute"}, {"api_name": "skimage.io", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "12422548025", "text": "from sklearn import model_selection\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom torch.utils.data import Dataset\nimport os\nimport torch\nimport pandas as pd\nimport numpy as np\n\nfrom discrepancy_datasetup import balance_dataset\nfrom discrepancy_datasetup import synonymsReplacement, shuffledTextAugmentation\nclass TextDataset(Dataset):\n\n def __init__(self, dataframe, tokenizer, dir_base, wordDict = None):\n self.tokenizer = tokenizer\n self.data = dataframe\n self.text1 = dataframe.impression1\n self.text2 = dataframe.impression2\n self.targets = self.data.label\n self.row_ids = self.data.index\n self.max_len = 512\n self.wordDict = wordDict\n\n #self.df_data = dataframe.values\n self.data_path = os.path.join(dir_base, \"public_datasets/candid_ptx/dataset1/dataset/\")\n self.dir_base = dir_base\n\n def __len__(self):\n return len(self.text1)\n\n\n def __getitem__(self, index):\n # text extraction\n #global img, image\n text1 = str(self.text1[index])\n text2 = str(self.text2[index])\n #if self.wordDict != None:\n # text1 = synonymsReplacement(self.wordDict, text1)\n # text1 = shuffledTextAugmentation(text1)\n # text2 = synonymsReplacement(self.wordDict, text2)\n # text2 = shuffledTextAugmentation(text2)\n text1 += text2\n text1 = \" \".join(text1.split())\n text2 = str(self.text2[index])\n text2 = \" \".join(text2.split())\n\n\n\n #print(text)\n #text = \"\"\n\n #text = text.replace(\"[ALPHANUMERICID]\", \"\")\n #text = text.replace(\"[date]\", \"\")\n #text = text.replace(\"[DATE]\", \"\")\n #text = text.replace(\"[AGE]\", \"\")\n\n #text = text.replace(\"[ADDRESS]\", \"\")\n #text = text.replace(\"[PERSONALNAME]\", \"\")\n #text = text.replace(\"\\n\", \"\")\n\n inputs1 = self.tokenizer.encode_plus(\n text1,\n None,\n add_special_tokens=True,\n max_length=self.max_len,\n #pad_to_max_length=True,\n padding= 'max_length', #True, # #TOD self.max_len,\n # padding='longest',\n truncation='longest_first',\n return_token_type_ids=True\n )\n ids1 = inputs1['input_ids']\n mask1 = inputs1['attention_mask']\n token_type_ids1 = inputs1[\"token_type_ids\"]\n\n inputs2 = self.tokenizer.encode_plus(\n text2,\n None,\n add_special_tokens=True,\n max_length=self.max_len,\n #pad_to_max_length=True,\n padding= 'max_length', #True, # #TOD self.max_len,\n # padding='longest',\n truncation='longest_first',\n return_token_type_ids=True\n )\n ids2 = inputs2['input_ids']\n mask2 = inputs2['attention_mask']\n token_type_ids2 = inputs2[\"token_type_ids\"]\n\n return {\n 'text1' : text1,\n 'ids1': torch.tensor(ids1, dtype=torch.long),\n 'mask1': torch.tensor(mask1, dtype=torch.long),\n 'token_type_ids1': torch.tensor(token_type_ids1, dtype=torch.long),\n\n 'text2' : text2,\n 'ids2': torch.tensor(ids2, dtype=torch.long),\n 'mask2': torch.tensor(mask2, dtype=torch.long),\n 'token_type_ids2': torch.tensor(token_type_ids2, dtype=torch.long),\n\n 'targets': torch.tensor(self.targets[index], dtype=torch.float),\n 'row_ids': self.row_ids[index],\n }\n\n\ndef setup_dataloader(df, config, tokenizer, wordDict=None):\n\n seed = config[\"seed\"]\n dir_base = config[\"dir_base\"]\n BATCH_SIZE = config[\"batch_size\"]\n # Splits the data into 80% train and 20% valid and test sets\n train_df, test_valid_df = model_selection.train_test_split(\n df, train_size=config[\"train_samples\"], random_state=seed, shuffle=True, stratify=df.label.values\n )\n # Splits the test and valid sets in half so they are both 10% of total data\n test_df, valid_df = model_selection.train_test_split(\n test_valid_df, test_size=config[\"valid_samples\"], random_state=seed, shuffle=True,\n stratify=test_valid_df.label.values\n )\n\n train_df = pd.concat([train_df, test_df])\n\n #train_df = balance_dataset(df, config)\n #train_df = balance_dataset(train_df, config, aug_factor=1)\n train_df.set_index(\"id\", inplace=True)\n valid_df.set_index(\"id\", inplace=True)\n test_df.set_index(\"id\", inplace=True)\n\n #print(fail)\n load_df_from_preset_location = False\n if load_df_from_preset_location:\n #train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_final_train/seed' +str(config[\"seed\"]) + '/train_df_seed' +str(config[\"seed\"]) + '.xlsx')\n #train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated/second_and_third_labeled_df'+ '.xlsx')\n #training set\n #train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train/seed' + str(config[\"seed\"]) + '/train_df_seed' +str(config[\"seed\"]) + '.xlsx')\n train_loc = os.path.join(dir_base, 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train_first_second_labeled/seed' + str(config[\"seed\"]) + '/train_df_seed' +str(config[\"seed\"]) + '.xlsx')\n train_df = pd.read_excel(train_loc, engine='openpyxl')\n\n #valid_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_finetuning/seed' +str(config[\"seed\"]) + '/valid_df_seed' +str(config[\"seed\"]) + '.xlsx')\n #valid_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train/seed' +str(config[\"seed\"]) + '/valid_df_seed' +str(config[\"seed\"]) + '.xlsx')\n valid_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_first_train_first_second_labeled/seed' +str(config[\"seed\"]) + '/valid_df_seed' +str(config[\"seed\"]) + '.xlsx')\n valid_df = pd.read_excel(valid_loc, engine='openpyxl')\n\n test_loc = os.path.join(dir_base,'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_v1/seed' +str(config[\"seed\"]) + '/test_df_seed' +str(config[\"seed\"]) + '.xlsx')\n test_df = pd.read_excel(test_loc, engine='openpyxl')\n\n fine_tuning = True\n if fine_tuning:\n\n train_loc = os.path.join(dir_base,\n 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_finetune/seed' + str(\n config[\"seed\"]) + '/train_df_seed' + str(config[\"seed\"]) + '.xlsx')\n train_df = pd.read_excel(train_loc, engine='openpyxl')\n\n valid_loc = os.path.join(dir_base,\n 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_finetune/seed' + str(\n config[\"seed\"]) + '/valid_df_seed' + str(config[\"seed\"]) + '.xlsx')\n valid_df = pd.read_excel(valid_loc, engine='openpyxl')\n\n test_loc = os.path.join(dir_base,\n 'Zach_Analysis/result_logs/discrepancy_detection/third_labeling_batch/data_folder_updated_v1/seed' + str(\n config[\"seed\"]) + '/test_df_seed' + str(config[\"seed\"]) + '.xlsx')\n test_df = pd.read_excel(test_loc, engine='openpyxl')\n\n save_df = True\n if save_df:\n save_location = config[\"save_location\"]\n train_dataframe_location = os.path.join(save_location, 'train_df_seed' + str(config[\"seed\"]) + '.xlsx')\n print(train_dataframe_location)\n train_df.to_excel(train_dataframe_location, index=True)\n\n valid_dataframe_location = os.path.join(save_location, 'valid_df_seed' + str(config[\"seed\"]) + '.xlsx')\n print(valid_dataframe_location)\n valid_df.to_excel(valid_dataframe_location, index=True)\n\n #test_dataframe_location = os.path.join(save_location, 'test_df_seed' + str(config[\"seed\"]) + '.xlsx')\n #print(test_dataframe_location)\n #test_df.to_excel(test_dataframe_location, index=True)\n\n training_set = TextDataset(train_df, tokenizer, dir_base=dir_base, wordDict= wordDict)\n valid_set = TextDataset(valid_df, tokenizer, dir_base=dir_base)\n test_set = TextDataset(test_df, tokenizer, dir_base=dir_base)\n\n train_params = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 4\n }\n\n test_params = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 4\n }\n\n ## added to trying sampling from training data\n #y_train_indices = training_set.indices\n #y_train_indices = range(0,len(train_df)) #gets a list of the index 0 to lenth of df\n #y_train = [training_set.targets[i] for i in y_train_indices] #get a list of all of the training labels\n #print(f\"y train: {y_train}\")\n #print(f\"y train len: {len(y_train)}\")\n #class_sample_count = np.array(\n # [len(np.where(y_train == t)[0]) for t in np.unique(y_train)]) # counts the number of each training value\n #print(type(class_sample_count))\n #print(f\"class sample count: {class_sample_count}\")\n\n #class_sample_count = np.array([1134, 94]) #sets the counts to the values in the orginal set\n #class_sample_count = np.array([1228, 1228])\n #class_sample_count = np.array([94, 1134])\n #class_sample_count = np.array([94, 1134])\n\n #print(f\"class sample count: {class_sample_count}\")\n #print(type(class_sample_count))\n\n #class_sample_count = [1134, 94]\n #weight = 1. / class_sample_count # calculates the weight for each sample\n #weight = np.array([1134/1758, 94/1758])\n #weight = np.array([1271/1762, 105/1762])\n #weight = np.array([100, 105/1762])\n\n\n #print(f\"weight values: {weight}\")\n #samples_weight = np.array([weight[t] for t in y_train]) # makes an array where each index is the weight to select it\n #print(f\"len of sample weights: {len(samples_weight)}\")\n #samples_weight = torch.from_numpy(samples_weight)\n #print(f\"samples weight: {samples_weight}\")\n #sampler = WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), 1368, replacement=False) # was 1228\n\n #y = torch.from_numpy(np.array([0, 0, 1, 1, 0, 0, 1, 1]))\n #y = torch.from_numpy(np.array(y_train))\n #sampler = StratifiedSampler(class_vector=y, batch_size=16)\n\n #training_loader = DataLoader(training_set, sampler=sampler, batch_size=BATCH_SIZE, num_workers=4)\n ##\n training_loader = DataLoader(training_set, **train_params)\n\n valid_loader = DataLoader(valid_set, **test_params)\n test_loader = DataLoader(test_set, **test_params)\n\n return training_loader, valid_loader, test_loader\n\n\ndef setup_random_training_loader(df_negative, df_positive, base_pos, base_neg, new_pos, new_neg, config, tokenizer, wordDict=None):\n # base dataest is 1134 negatives for 94 postives\n\n seed = config[\"seed\"]\n dir_base = config[\"dir_base\"]\n BATCH_SIZE = config[\"batch_size\"]\n\n #train_df_positive = df_positive.sample(n=21)\n #train_df = pd.concat([train_df_positive, df_negative])\n #train_df = pd.concat([ train_df, base_pos])\n #df_negative = df_negative.sample(n=1134)\n #df_positive = df_positive.sample(n=94)\n #train_df = pd.concat([df_negative, base_pos])\n\n #added_pos = new_pos.sample(11) #get n samples from positves cases\n #postive_df = pd.concat([base_pos, added_pos]) #add the n samples to the already postive cases\n #negative_df = pd.concat([base_neg, new_neg]) #add the new negative samples to the negative cases\n #train_df = pd.concat([postive_df, negative_df]) #create final training set\n\n train_df = pd.concat([base_pos, base_neg])\n\n training_set = TextDataset(train_df, tokenizer, dir_base=dir_base, wordDict= wordDict)\n\n\n train_params = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 4\n }\n\n training_loader = DataLoader(training_set, **train_params)\n\n return training_loader", "repo_name": "zhuemann/discrepancy_detection", "sub_path": "dataloader.py", "file_name": "dataloader.py", "file_ext": "py", "file_size_in_byte": 12524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "61", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 95, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 98, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 102, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 113, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 113, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 117, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 237, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 261, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "24748458957", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 23 06:14:31 2022\n\n@author: docker\n\"\"\"\nimport copy\nimport random\nimport itertools\nimport collections\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom gldadec import utils\n\nclass SetData():\n def __init__(self,verbose=True):\n self.verbose = verbose\n self.raw_df = None\n self.marker_dic = None\n self.final_int = None\n self.input_mat = None\n \n def set_expression(self,df):\n \"\"\"\n Set gene expression data.\n It is better to keep as many genes as possible.\n ----------\n df : DataFrame\n Genes in rows and samples in columns.\n \"\"\"\n df.index = [t.upper() for t in df.index.tolist()] # re-index\n self.raw_df = df\n if self.verbose:\n a,b = self.raw_df.shape\n print(a,'genes')\n print(b,'samples')\n \n def set_marker(self,marker_dic:dict):\n \"\"\"\n Set marker list for each cell\n ----------\n marker_dic : dict\n \n \"\"\"\n # convert uppercase\n new_v = []\n new_k = []\n for i,k in enumerate(marker_dic):\n if len(marker_dic.get(k)) > 0:\n tmp_v = sorted([t.upper() for t in marker_dic.get(k)])\n new_v.append(tmp_v)\n new_k.append(k)\n else:\n pass\n marker_dic2 = dict(zip(new_k,new_v))\n self.marker_dic = marker_dic2\n if self.verbose:\n print(len(self.marker_dic),'cells')\n print(len(marker_dic)-len(self.marker_dic),'cells were removed (markers were not registered)')\n \n def marker_info_processing(self,do_plot=True):\n # reflect expression data\n marker_dic = self.marker_dic\n genes = self.raw_df.index.tolist()\n new_v = []\n new_k = []\n for i,k in enumerate(marker_dic):\n marker = marker_dic.get(k)\n tmp_common = sorted(list(set(marker) & set(genes)))\n if len(tmp_common) > 0:\n tmp_v = [t.upper() for t in tmp_common]\n new_v.append(tmp_v)\n new_k.append(k)\n else:\n pass\n marker_dic3 = dict(zip(new_k,new_v))\n self.marker_dic = marker_dic3\n marker_genes = set(list(itertools.chain.from_iterable(list(self.marker_dic.values()))))\n if self.verbose:\n print('--- reflect genes in expression ---')\n print(len(self.marker_dic),'cells')\n print(len(marker_dic)-len(self.marker_dic),'cells were removed (markers were not registered)')\n print(len(marker_genes),'genes were registered')\n \n # plot the original registered marker size\n if do_plot:\n y = [len(t) for t in self.marker_dic.values()]\n x = [i for i in range(len(y))]\n plt.bar(x,y)\n plt.xticks(x,self.marker_dic.keys(),rotation=75)\n plt.title('Original Marker Size')\n plt.show()\n \n # detect cell specific markers\n count_dic = dict(collections.Counter(list(itertools.chain.from_iterable(list(self.marker_dic.values())))))\n sort_count = sorted(count_dic.items(),key=lambda x : x[1])\n unique_marker = [] # no overlap\n for t in sort_count:\n if t[1] == 1:\n unique_marker.append(t[0])\n else:\n pass\n new_v = []\n new_k = []\n for i,k in enumerate(self.marker_dic):\n tmp_v = sorted(list(set(self.marker_dic.get(k)) & set(unique_marker)))\n if len(tmp_v) > 0:\n new_v.append(tmp_v)\n new_k.append(k)\n else:\n pass\n self.spe_marker_dic = dict(zip(new_k,new_v))\n spe_marker_genes = set(list(itertools.chain.from_iterable(list(self.spe_marker_dic.values()))))\n if self.verbose:\n print('--- extract cell specific marker ---')\n print(len(self.spe_marker_dic),'cells')\n print(set(self.marker_dic.keys())-set(self.spe_marker_dic.keys()),'cells were removed (no marker after removing overlap)')\n print(len(spe_marker_genes),'genes were registered')\n \n # plot the cell specific marker size\n if do_plot:\n y = [len(t) for t in self.spe_marker_dic.values()]\n x = [i for i in range(len(y))]\n plt.bar(x,y)\n plt.xticks(x,self.spe_marker_dic.keys(),rotation=75)\n plt.title('Specific Marker Size')\n plt.show()\n \n def set_random(self,random_sets:list):\n \"\"\"\n Random states list\n ----------\n random_sets : list\n e.g. [1448, 1632, 5913, 7927, 8614,...]\n \"\"\"\n self.random_sets = random_sets\n \n def expression_processing(self,random_genes=None,random_n=0,specific=True,random_s=None,prior_norm=True,norm_scale=1000):\n \"\"\"\n 1. Determine if the markers are cell specific.\n 2. Add non-marker gene at random.\n 3. Process expression data into a format for analysis\n ----------\n random_n : int\n DESCRIPTION. The default is 0.\n specific : bool\n DESCRIPTION. The default is True.\n \"\"\"\n if specific:\n if self.verbose:\n print('use specific markers')\n self.marker_final_dic = self.spe_marker_dic\n else:\n if self.verbose:\n print('use overlap markers')\n self.marker_final_dic = self.marker_dic\n \n genes = list(itertools.chain.from_iterable(list(self.marker_final_dic.values()))) # marker genes\n \n raw_df = copy.deepcopy(self.raw_df)\n if random_s is None:\n random_s = self.random_sets[0]\n random.seed(random_s)\n random_candidates = sorted(list(set(raw_df.index.tolist()) - set(genes))) # total genes - marker genes\n if random_genes is None:\n random_genes = random.sample(random_candidates,random_n) # choose genes from non-marker genes\n if self.verbose:\n print(len(random_genes),'genes were added at random')\n else:\n pass\n \n union = sorted(list(set(random_genes) | set(genes)))\n common = sorted(list(set(raw_df.index.tolist()) & set(union))) # fix the output gene order\n target_df = raw_df.loc[common]\n\n # prior information normalization\n if prior_norm:\n linear_norm = utils.freq_norm(target_df,self.marker_final_dic)\n linear_norm = linear_norm.loc[sorted(linear_norm.index.tolist())]\n final_df = linear_norm/norm_scale\n else:\n final_df = target_df/norm_scale\n self.final_int = final_df.astype(int) # convert int\n self.input_mat = np.array(self.final_int.T,dtype='int64')\n\n # seed-topic preparation\n gene_names = [t.upper() for t in self.final_int.index.tolist()]\n self.gene2id = dict((v, idx) for idx, v in enumerate(gene_names))\n self.random_genes = random_genes\n \n def expression_processing2(self,specific=True):\n \"\"\"\n 1. Determine if the markers are cell specific.\n 2. Add non-marker gene at random to each topic.\n 3. Process expression data into a format for analysis\n ----------\n specific : bool\n DESCRIPTION. The default is True.\n \"\"\"\n if specific:\n if self.verbose:\n print('use specific markers')\n self.marker_final_dic = self.spe_marker_dic\n else:\n if self.verbose:\n print('use overlap markers')\n self.marker_final_dic = self.marker_dic\n \n marker_final_dic = copy.deepcopy(self.marker_final_dic)\n genes = list(itertools.chain.from_iterable(list(marker_final_dic.values()))) # marker genes\n raw_df = copy.deepcopy(self.raw_df)\n\n random_list = []\n new_list = []\n for i,k in enumerate(marker_final_dic):\n m = marker_final_dic.get(k)\n random_candidates = sorted(list(set(raw_df.index.tolist()) - set(genes))) # total genes - marker genes\n random.seed(i)\n random_gene = random.sample(random_candidates,len(m))\n m.extend(random_gene)\n new_list.append(sorted(m))\n random_list.append(random_gene)\n genes.extend(random_gene)\n new_dic = dict(zip(list(marker_final_dic.keys()), new_list))\n # FIXME: overwrite\n self.marker_final_dic = new_dic\n \n common = list(itertools.chain.from_iterable(list(new_dic.values()))) # marker genes\n final_df = raw_df.loc[common]\n self.final_int = final_df.astype(int) # convert int\n self.input_mat = np.array(self.final_int.T,dtype='int64')\n\n # seed-topic preparation\n gene_names = [t.upper() for t in self.final_int.index.tolist()]\n self.gene2id = dict((v, idx) for idx, v in enumerate(gene_names))\n #self.random_genes = random_genes\n \n def seed_processing(self):\n \"\"\"\n Prepare seed information for use as a guide.\n \n input_mat : np.array\n samples are in rows and genes (markers) are in columns.\n array([[7, 4, 5, ..., 4, 9, 4],\n [7, 4, 5, ..., 5, 8, 4],\n [6, 4, 4, ..., 4, 9, 5],\n ...,\n [7, 4, 4, ..., 4, 8, 4],\n [7, 4, 5, ..., 4, 9, 4],\n [8, 4, 4, ..., 4, 9, 4]])\n seed_topics : dict\n seed_topics: dict\n e.g.{0: [4,3],\n 1: [4],\n 2: [1],\n 3: [1,3,5],\n 4: [1],\n 5: [7]}\n seed_k : list\n [1,3,5,7,9,11,13]\n marker_final_dic : dcit\n {'B cells memory': ['AIM2', 'CR2', 'JCHAIN'],\n 'B cells naive': ['BCL7A', 'CD24', 'FCER2', 'IL4R', 'PAX5', 'TCL1A'],\n 'Monocytes': ['ALOX5AP','C5AR1','CCR2','CD14','CD163','CD274',...]}\n\n \"\"\"\n if self.marker_final_dic is None:\n raise ValueError('!! Final Marker Candidates were not defined !! --> run expression_processing()')\n # seed_topic preparation\n genes = list(itertools.chain.from_iterable(list(self.marker_final_dic.values())))\n target = list(self.marker_final_dic.keys())\n seed_topic_list = [self.marker_final_dic.get(t) for t in target]\n seed_topics = {}\n finish_genes = []\n for t_id, st in enumerate(seed_topic_list):\n for gene in st:\n try:\n if gene in finish_genes:\n tmp = seed_topics[self.gene2id[gene]]\n seed_topics[self.gene2id[gene]] = tmp + [t_id]\n else:\n seed_topics[self.gene2id[gene]] = [t_id]\n finish_genes.append(gene)\n except:\n # not included in target expression table\n print(gene)\n pass\n \n # reliable gene\n genes = list(itertools.chain.from_iterable(list(self.marker_final_dic.values())))\n seed_k = []\n for g in genes:\n if self.gene2id.get(g) is None:\n #print(g)\n pass\n else:\n seed_k.append(self.gene2id.get(g))\n\n self.seed_topics = seed_topics\n seed_k = sorted(list(set(seed_k)))\n self.seed_k = seed_k\n \n if self.verbose:\n print(\"final genes:\",len(self.final_int))\n print('seed number:',len(self.seed_topics))\n print(\"seed_k:\",len(self.seed_k))\n\ndef main():\n raw_df = pd.read_csv('/mnt/AzumaDeconv/github/GLDADec/data/GSE65133/GSE65133_expression.csv',index_col=0)\n marker_dic = pd.read_pickle('/mnt/AzumaDeconv/github/GLDADec/data/domain_info/human_PBMC_CellMarker_8cell_raw_dic_v1.pkl')\n random_sets = pd.read_pickle('/mnt/AzumaDeconv/github/GLDADec/data/random_info/100_random_sets.pkl')\n\n SD = SetData()\n SD.set_expression(df=raw_df) \n SD.set_marker(marker_dic=marker_dic)\n SD.marker_info_processing(do_plot=True)\n SD.set_random(random_sets=random_sets)\n SD.expression_processing(random_n=0,specific=True)\n SD.seed_processing()\n \n # Collect data to be used in later analyses\n input_mat = SD.input_mat\n final_int = SD.final_int\n seed_topics = SD.seed_topics\n marker_final_dic = SD.marker_final_dic\n \n # save\n out_path = '/mnt/AzumaDeconv/github/GLDADec/Dev/test_data/'\n pd.to_pickle(final_int,out_path+'final_int.pkl')\n pd.to_pickle(seed_topics,out_path+'seed_topics.pkl')\n pd.to_pickle(marker_final_dic,out_path+'marker_final_dic.pkl')\n\nif __name__ == '__main__':\n main()", "repo_name": "mizuno-group/GLDADec", "sub_path": "run/dev1_set_data.py", "file_name": "dev1_set_data.py", "file_ext": "py", "file_size_in_byte": 12913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "itertools.chain.from_iterable", "line_number": 81, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 81, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 98, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 98, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 98, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 116, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 116, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "itertools.chain.from_iterable", "line_number": 161, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 161, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 163, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 166, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 169, "usage_type": "call"}, {"api_name": "gldadec.utils.freq_norm", "line_number": 181, "usage_type": "call"}, {"api_name": "gldadec.utils", "line_number": 181, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 212, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 213, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 213, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 214, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 221, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 222, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 231, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 231, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 234, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 273, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 273, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 293, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 293, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 312, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 313, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 314, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 332, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 333, "usage_type": "call"}, {"api_name": "pandas.to_pickle", "line_number": 334, "usage_type": "call"}]} +{"seq_id": "71010364996", "text": "#%%\r\nfrom py_vollib.black_scholes import black_scholes as bs\r\nfrom py_vollib.black_scholes.greeks.analytical import vega\r\nfrom py_vollib.black_scholes.implied_volatility import implied_volatility\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn as sns\r\n# %%\r\ndef implied_vol(S0, K, T, r, market_price, flag='c', tol=0.00001):\r\n \"\"\"Calculating the implied volatility of an European option\r\n S0: stock price\r\n K: strike price\r\n T: time to maturity\r\n r: risk-free rate\r\n market_price: option price in market\r\n \"\"\"\r\n max_iter = 200 #max no. of iterations\r\n vol_old = 0.3 #initial guess \r\n\r\n for k in range(max_iter):\r\n bs_price = bs(flag, S0, K, T, r, vol_old)\r\n Cprime = vega(flag, S0, K, T, r, vol_old)*100\r\n C = bs_price - market_price\r\n\r\n vol_new = vol_old - C/Cprime\r\n new_bs_price = bs(flag, S0, K, T, r, vol_new)\r\n if (abs(vol_old-vol_new) < tol or abs(new_bs_price-market_price) < tol):\r\n break\r\n\r\n vol_old = vol_new\r\n\r\n implied_vol = vol_new\r\n return implied_vol\r\n#%%\r\nS0, K, t, r = 83.11, 80, 1/250, 0.025\r\nmarket_price = 3.23\r\niv, iter = implied_vol(S0, K, t, r, market_price)\r\nprint(\"La volatilidad implicita {0:5.2f}, fue calculada con {1:.0f}\".format(iv*100, iter))\r\n\r\n# %%\r\nbs_over_iv = [ bs('c', S0, K, t, r, iv/100) for iv in range(0,100,1) ]\r\nplt.figure()\r\nplt.plot(bs_over_iv)\r\nplt.title('BS prima')\r\nplt.xlabel('Implied Volatility (%)')\r\nplt.ylabel('Call Price ($)')\r\nplt.show()\r\n\r\n#%%\r\ndef implied_vol2(S0, K, t, r, market_price, flag='c', exa=0.00001, vol_old=0.3, max_iter=200):\r\n \"\"\"Calculating the implied volatility of an European option\r\n S0: stock price\r\n K: strike price\r\n T: time to maturity\r\n r: risk-free rate\r\n market_price: option price in market\r\n flag: c or p\r\n acc: accuracy / error tolerance\r\n vol_old: initial guess\r\n max_iter: max no. of iterations\r\n \"\"\"\r\n err_vol = float('inf')\r\n err_prc = float('inf')\r\n iter = 0\r\n bs_price = bs(flag, S0, K, t, r, vol_old)\r\n while err_vol > exa or err_prc > exa or iter > max_iter:\r\n Cprime = vega(flag, S0, K, t, r, vol_old)*100\r\n C = bs_price - market_price\r\n vol_new = vol_old - C/Cprime\r\n new_bs_price = bs(flag, S0, K, t, r, vol_new)\r\n err_vol = abs(vol_old - vol_new)\r\n err_prc = abs(new_bs_price - market_price)\r\n vol_old = vol_new\r\n bs_price = new_bs_price\r\n iter += 1\r\n\r\n implied_vol = vol_new\r\n return implied_vol\r\n\r\nS0, K, t, r = 83.11, 80, 1/250, 0.025\r\nmarket_price = 5\r\n#iv, iter = implied_vol(S0, K, t, r, market_price)\r\n#print(\"La volatilidad implicita {0:5.2f}, fue calculada con {1:.0f}\".format(iv*100, iter))\r\n\r\n#%%\r\n\r\ndata = pd.read_csv('C:/Users/nuno/OneDrive - ITESO/Ciencia de Datos'\r\n '/idi_ii/tsla_options_last.csv')\r\ndata.head()\r\ntest = data[10:11]\r\n# %%\r\nS = 1132\r\nr = 0.0025\r\n#%%\r\ntest['iv'] = test.apply(lambda row: implied_vol(S,\r\n row['Strike'],\r\n row['tau'],\r\n r,\r\n row['Last Sale']), axis=1)\r\n# %%\r\n\r\ntest['iv'] = test.apply(lambda row: implied_volatility(row['Last Sale'],\r\n S,\r\n row['Strike'],\r\n row['tau'],\r\n r,\r\n row['type']), axis=1)\r\n\r\n#%%\r\niv_vctr = [implied_volatility(row[2],\r\n S,\r\n row[7],\r\n row[8],\r\n r,\r\n row[9]) for row in test]\r\n\r\n#%%\r\niv_vctr = []\r\nfor index, row in test.iterrows():\r\n print(bs(row['type'], S, row['Strike'], row['tau'], r, 0))\r\n print(row['Last Sale'])\r\n if bs(row['type'], S, row['Strike'], row['tau'], r, 0) < row['Last Sale']:\r\n iv_vctr.append(implied_volatility(row['Last Sale'],S,row['Strike'],row['tau'],r,row['type']))\r\n else:\r\n iv_vctr.append(0)\r\n#%%\r\n\r\niv_vctr_be_rational = []\r\nfor index, row in data.iterrows():\r\n if bs(row['type'], S, row['Strike'], row['tau'], r, 0) < row['Last Sale']:\r\n try:\r\n iv_vctr_be_rational.append(implied_volatility(row['Last Sale'],S,row['Strike'],row['tau'],r,row['type']))\r\n except:\r\n iv_vctr_be_rational.append(0)\r\n else:\r\n iv_vctr_be_rational.append(0)\r\n\r\n#%%\r\niv_vctr_newton = []\r\nfor index, row in data.iterrows():\r\n print(index)\r\n try:\r\n if bs(row['type'], S, row['Strike'], row['tau'], r, 0) < row['Last Sale']:\r\n try:\r\n iv_vctr_newton.append(implied_vol2(S,row['Strike'],row['tau'],r, row['Last Sale'], flag=row['type']))\r\n except:\r\n iv_vctr_newton.append(0)\r\n else:\r\n iv_vctr_newton.append(0)\r\n except:\r\n iv_vctr_newton.append(0)\r\n\r\n#%%\r\ndata['iv'] = iv_vctr_be_rational\r\n#%%\r\nsns.relplot(\r\n data=data[data.iv != 0], x='Strike', y='iv', hue='type', col='Expiration Date', kind='scatter', col_wrap=3\r\n).set(ylim=(0, 5))\r\n\r\n#%%\r\nimplied_volatility(587.5, S, 550, 0.0083, r, 'c')\r\nimplied_vol(S, 550, 0.0083, r, 587,)\r\n# %%\r\nbs('c', S,550, 0.0083, r,4)\r\n# %%\r\n", "repo_name": "daniel-nuno/IDI-II", "sub_path": "code/proyecto_iv_newton.py", "file_name": "proyecto_iv_newton.py", "file_ext": "py", "file_size_in_byte": 5545, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "61", "api": [{"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 21, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.greeks.analytical.vega", "line_number": 22, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 26, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 65, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.greeks.analytical.vega", "line_number": 67, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 87, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 102, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 110, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 120, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 122, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 123, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 130, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 132, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 143, "usage_type": "call"}, {"api_name": "seaborn.relplot", "line_number": 156, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.implied_volatility.implied_volatility", "line_number": 161, "usage_type": "call"}, {"api_name": "py_vollib.black_scholes.black_scholes", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "32305263443", "text": "\"\"\"\nSync Lyrics module for the console\n\"\"\"\n\nimport asyncio\nimport logging\nfrom pathlib import Path\nfrom typing import List\n\nfrom spotdl.download.downloader import Downloader\nfrom spotdl.types.song import Song\nfrom spotdl.utils.ffmpeg import FFMPEG_FORMATS\nfrom spotdl.utils.lrc import generate_lrc\nfrom spotdl.utils.metadata import embed_metadata, get_file_metadata\nfrom spotdl.utils.search import QueryError, get_search_results, reinit_song\n\n__all__ = [\"meta\"]\n\nlogger = logging.getLogger(__name__)\n\n\ndef meta(query: List[str], downloader: Downloader) -> None:\n \"\"\"\n This function applies metadata to the selected songs\n based on the file name.\n If song already has metadata, missing metadata is added\n\n ### Arguments\n - query: list of strings to search for.\n - downloader: Already initialized downloader instance.\n\n ### Notes\n - This function is multi-threaded.\n \"\"\"\n\n # Create a list of all songs from all paths in query\n paths: List[Path] = []\n for path in query:\n test_path = Path(path)\n if not test_path.exists():\n logger.error(\"Path does not exist: %s\", path)\n continue\n\n if test_path.is_dir():\n for out_format in FFMPEG_FORMATS:\n paths.extend(test_path.glob(f\"*.{out_format}\"))\n elif test_path.is_file():\n if test_path.suffix.split(\".\")[-1] not in FFMPEG_FORMATS:\n logger.error(\"File is not a supported audio format: %s\", path)\n continue\n\n paths.append(test_path)\n\n def process_file(file: Path):\n song_meta = get_file_metadata(file, downloader.settings[\"id3_separator\"])\n\n # Check if song has metadata\n # and if it has all the required fields\n # if it has all of these fields, we can assume that the metadata is correct\n if song_meta and not downloader.settings[\"force_update_metadata\"]:\n if (\n song_meta.get(\"artist\")\n and song_meta.get(\"artists\")\n and song_meta.get(\"name\")\n and song_meta.get(\"lyrics\")\n and song_meta.get(\"album_art\")\n ):\n logger.info(\"Song already has metadata: %s\", file.name)\n if downloader.settings[\"generate_lrc\"]:\n lrc_file = file.with_suffix(\".lrc\")\n if lrc_file.exists():\n logger.info(\"Lrc file already exists for %s\", file.name)\n return None\n\n song = Song.from_missing_data(\n name=song_meta[\"name\"],\n artists=song_meta[\"artists\"],\n artist=song_meta[\"artist\"],\n )\n\n generate_lrc(song, file)\n if lrc_file.exists():\n logger.info(\"Saved lrc file for %s\", song.display_name)\n else:\n logger.info(\"Could not find lrc file for %s\", song.display_name)\n\n return None\n\n # Same as above\n if (\n not song_meta\n or None\n in [\n song_meta.get(\"name\"),\n song_meta.get(\"album_art\"),\n song_meta.get(\"artist\"),\n song_meta.get(\"artists\"),\n song_meta.get(\"track_number\"),\n ]\n or downloader.settings[\"force_update_metadata\"]\n ):\n # Song does not have metadata, or it is missing some fields\n # or we are forcing update of metadata\n # so we search for it\n logger.debug(\"Searching metadata for %s\", file.name)\n search_results = get_search_results(file.stem)\n if not search_results:\n logger.error(\"Could not find metadata for %s\", file.name)\n return None\n\n song = search_results[0]\n else:\n # Song has metadata, so we use it to reinitialize the song object\n # and fill in the missing metadata\n try:\n song = reinit_song(Song.from_missing_data(**song_meta))\n except QueryError:\n logger.error(\"Could not find metadata for %s\", file.name)\n return None\n\n # Check if the song has lyric\n # if not use downloader to find lyrics\n if song_meta is None or song_meta.get(\"lyrics\") is None:\n logger.debug(\"Fetching lyrics for %s\", song.display_name)\n song.lyrics = downloader.search_lyrics(song)\n if song.lyrics:\n logger.info(\"Found lyrics for song: %s\", song.display_name)\n else:\n song.lyrics = song_meta.get(\"lyrics\")\n\n # Apply metadata to the song\n embed_metadata(file, song)\n\n logger.info(\"Applied metadata to %s\", file.name)\n\n if downloader.settings[\"generate_lrc\"]:\n lrc_file = file.with_suffix(\".lrc\")\n if lrc_file.exists():\n logger.info(\"Lrc file already exists for %s\", file.name)\n return None\n\n generate_lrc(song, file)\n if lrc_file.exists():\n logger.info(\"Saved lrc file for %s\", song.display_name)\n else:\n logger.info(\"Could not find lrc file for %s\", song.display_name)\n\n return None\n\n async def pool_worker(file_path: Path) -> None:\n async with downloader.semaphore:\n # The following function calls blocking code, which would block whole event loop.\n # Therefore it has to be called in a separate thread via ThreadPoolExecutor. This\n # is not a problem, since GIL is released for the I/O operations, so it shouldn't\n # hurt performance.\n await downloader.loop.run_in_executor(None, process_file, file_path)\n\n tasks = [pool_worker(path) for path in paths]\n\n # call all task asynchronously, and wait until all are finished\n downloader.loop.run_until_complete(asyncio.gather(*tasks))\n", "repo_name": "spotDL/spotify-downloader", "sub_path": "spotdl/console/meta.py", "file_name": "meta.py", "file_ext": "py", "file_size_in_byte": 5995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 13430, "dataset": "github-code", "pt": "61", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "spotdl.download.downloader.Downloader", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 37, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 39, "usage_type": "call"}, {"api_name": "spotdl.utils.ffmpeg.FFMPEG_FORMATS", "line_number": 45, "usage_type": "name"}, {"api_name": "spotdl.utils.ffmpeg.FFMPEG_FORMATS", "line_number": 48, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "name"}, {"api_name": "spotdl.utils.metadata.get_file_metadata", "line_number": 55, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song.from_missing_data", "line_number": 75, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song", "line_number": 75, "usage_type": "name"}, {"api_name": "spotdl.utils.lrc.generate_lrc", "line_number": 81, "usage_type": "call"}, {"api_name": "spotdl.utils.search.get_search_results", "line_number": 106, "usage_type": "call"}, {"api_name": "spotdl.utils.search.reinit_song", "line_number": 116, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song.from_missing_data", "line_number": 116, "usage_type": "call"}, {"api_name": "spotdl.types.song.Song", "line_number": 116, "usage_type": "name"}, {"api_name": "spotdl.utils.search.QueryError", "line_number": 117, "usage_type": "name"}, {"api_name": "spotdl.utils.metadata.embed_metadata", "line_number": 132, "usage_type": "call"}, {"api_name": "spotdl.utils.lrc.generate_lrc", "line_number": 142, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 150, "usage_type": "name"}, {"api_name": "asyncio.gather", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "7284529662", "text": "import pickle\nfrom pathlib import Path\n\nimport hydra\nfrom hydra.core.hydra_config import HydraConfig\nfrom omegaconf import DictConfig, OmegaConf\n\nimport flwr as fl\n\nfrom dataset import prepare_dataset\nfrom client import generate_client_fn\nfrom server import get_on_fit_config, get_evaluate_fn\n\n\n# A decorator for Hydra. This tells hydra to by default load the config in conf/base.yaml\n@hydra.main(config_path=\"conf\", config_name=\"base\", version_base=None)\ndef main(cfg: DictConfig):\n ## 1. Parse config & get experiment output dir\n print(OmegaConf.to_yaml(cfg))\n # Hydra automatically creates a directory for your experiments\n # by default it would be in /outputs//